Show More
@@ -1,729 +1,712 b'' | |||||
1 | # __init__.py - fsmonitor initialization and overrides |
|
1 | # __init__.py - fsmonitor initialization and overrides | |
2 | # |
|
2 | # | |
3 | # Copyright 2013-2016 Facebook, Inc. |
|
3 | # Copyright 2013-2016 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | '''Faster status operations with the Watchman file monitor (EXPERIMENTAL) |
|
8 | '''Faster status operations with the Watchman file monitor (EXPERIMENTAL) | |
9 |
|
9 | |||
10 | Integrates the file-watching program Watchman with Mercurial to produce faster |
|
10 | Integrates the file-watching program Watchman with Mercurial to produce faster | |
11 | status results. |
|
11 | status results. | |
12 |
|
12 | |||
13 | On a particular Linux system, for a real-world repository with over 400,000 |
|
13 | On a particular Linux system, for a real-world repository with over 400,000 | |
14 | files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same |
|
14 | files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same | |
15 | system, with fsmonitor it takes about 0.3 seconds. |
|
15 | system, with fsmonitor it takes about 0.3 seconds. | |
16 |
|
16 | |||
17 | fsmonitor requires no configuration -- it will tell Watchman about your |
|
17 | fsmonitor requires no configuration -- it will tell Watchman about your | |
18 | repository as necessary. You'll need to install Watchman from |
|
18 | repository as necessary. You'll need to install Watchman from | |
19 | https://facebook.github.io/watchman/ and make sure it is in your PATH. |
|
19 | https://facebook.github.io/watchman/ and make sure it is in your PATH. | |
20 |
|
20 | |||
21 | The following configuration options exist: |
|
21 | The following configuration options exist: | |
22 |
|
22 | |||
23 | :: |
|
23 | :: | |
24 |
|
24 | |||
25 | [fsmonitor] |
|
25 | [fsmonitor] | |
26 | mode = {off, on, paranoid} |
|
26 | mode = {off, on, paranoid} | |
27 |
|
27 | |||
28 | When `mode = off`, fsmonitor will disable itself (similar to not loading the |
|
28 | When `mode = off`, fsmonitor will disable itself (similar to not loading the | |
29 | extension at all). When `mode = on`, fsmonitor will be enabled (the default). |
|
29 | extension at all). When `mode = on`, fsmonitor will be enabled (the default). | |
30 | When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem, |
|
30 | When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem, | |
31 | and ensure that the results are consistent. |
|
31 | and ensure that the results are consistent. | |
32 |
|
32 | |||
33 | :: |
|
33 | :: | |
34 |
|
34 | |||
35 | [fsmonitor] |
|
35 | [fsmonitor] | |
36 | timeout = (float) |
|
36 | timeout = (float) | |
37 |
|
37 | |||
38 | A value, in seconds, that determines how long fsmonitor will wait for Watchman |
|
38 | A value, in seconds, that determines how long fsmonitor will wait for Watchman | |
39 | to return results. Defaults to `2.0`. |
|
39 | to return results. Defaults to `2.0`. | |
40 |
|
40 | |||
41 | :: |
|
41 | :: | |
42 |
|
42 | |||
43 | [fsmonitor] |
|
43 | [fsmonitor] | |
44 | blacklistusers = (list of userids) |
|
44 | blacklistusers = (list of userids) | |
45 |
|
45 | |||
46 | A list of usernames for which fsmonitor will disable itself altogether. |
|
46 | A list of usernames for which fsmonitor will disable itself altogether. | |
47 |
|
47 | |||
48 | :: |
|
48 | :: | |
49 |
|
49 | |||
50 | [fsmonitor] |
|
50 | [fsmonitor] | |
51 | walk_on_invalidate = (boolean) |
|
51 | walk_on_invalidate = (boolean) | |
52 |
|
52 | |||
53 | Whether or not to walk the whole repo ourselves when our cached state has been |
|
53 | Whether or not to walk the whole repo ourselves when our cached state has been | |
54 | invalidated, for example when Watchman has been restarted or .hgignore rules |
|
54 | invalidated, for example when Watchman has been restarted or .hgignore rules | |
55 | have been changed. Walking the repo in that case can result in competing for |
|
55 | have been changed. Walking the repo in that case can result in competing for | |
56 | I/O with Watchman. For large repos it is recommended to set this value to |
|
56 | I/O with Watchman. For large repos it is recommended to set this value to | |
57 | false. You may wish to set this to true if you have a very fast filesystem |
|
57 | false. You may wish to set this to true if you have a very fast filesystem | |
58 | that can outpace the IPC overhead of getting the result data for the full repo |
|
58 | that can outpace the IPC overhead of getting the result data for the full repo | |
59 | from Watchman. Defaults to false. |
|
59 | from Watchman. Defaults to false. | |
60 |
|
60 | |||
61 | fsmonitor is incompatible with the largefiles and eol extensions, and |
|
61 | fsmonitor is incompatible with the largefiles and eol extensions, and | |
62 | will disable itself if any of those are active. |
|
62 | will disable itself if any of those are active. | |
63 |
|
63 | |||
64 | ''' |
|
64 | ''' | |
65 |
|
65 | |||
66 | # Platforms Supported |
|
66 | # Platforms Supported | |
67 | # =================== |
|
67 | # =================== | |
68 | # |
|
68 | # | |
69 | # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably, |
|
69 | # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably, | |
70 | # even under severe loads. |
|
70 | # even under severe loads. | |
71 | # |
|
71 | # | |
72 | # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor |
|
72 | # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor | |
73 | # turned on, on case-insensitive HFS+. There has been a reasonable amount of |
|
73 | # turned on, on case-insensitive HFS+. There has been a reasonable amount of | |
74 | # user testing under normal loads. |
|
74 | # user testing under normal loads. | |
75 | # |
|
75 | # | |
76 | # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but |
|
76 | # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but | |
77 | # very little testing has been done. |
|
77 | # very little testing has been done. | |
78 | # |
|
78 | # | |
79 | # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet. |
|
79 | # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet. | |
80 | # |
|
80 | # | |
81 | # Known Issues |
|
81 | # Known Issues | |
82 | # ============ |
|
82 | # ============ | |
83 | # |
|
83 | # | |
84 | # * fsmonitor will disable itself if any of the following extensions are |
|
84 | # * fsmonitor will disable itself if any of the following extensions are | |
85 | # enabled: largefiles, inotify, eol; or if the repository has subrepos. |
|
85 | # enabled: largefiles, inotify, eol; or if the repository has subrepos. | |
86 | # * fsmonitor will produce incorrect results if nested repos that are not |
|
86 | # * fsmonitor will produce incorrect results if nested repos that are not | |
87 | # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`. |
|
87 | # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`. | |
88 | # |
|
88 | # | |
89 | # The issues related to nested repos and subrepos are probably not fundamental |
|
89 | # The issues related to nested repos and subrepos are probably not fundamental | |
90 | # ones. Patches to fix them are welcome. |
|
90 | # ones. Patches to fix them are welcome. | |
91 |
|
91 | |||
92 | from __future__ import absolute_import |
|
92 | from __future__ import absolute_import | |
93 |
|
93 | |||
94 | import codecs |
|
94 | import codecs | |
95 | import hashlib |
|
95 | import hashlib | |
96 | import os |
|
96 | import os | |
97 | import stat |
|
97 | import stat | |
98 | import sys |
|
98 | import sys | |
99 |
|
99 | |||
100 | from mercurial.i18n import _ |
|
100 | from mercurial.i18n import _ | |
101 | from mercurial import ( |
|
101 | from mercurial import ( | |
102 | context, |
|
102 | context, | |
103 | encoding, |
|
103 | encoding, | |
104 | error, |
|
104 | error, | |
105 | extensions, |
|
105 | extensions, | |
106 | localrepo, |
|
106 | localrepo, | |
107 | merge, |
|
107 | merge, | |
108 | pathutil, |
|
108 | pathutil, | |
109 | pycompat, |
|
109 | pycompat, | |
110 | scmutil, |
|
110 | scmutil, | |
111 | util, |
|
111 | util, | |
112 | ) |
|
112 | ) | |
113 | from mercurial import match as matchmod |
|
113 | from mercurial import match as matchmod | |
114 |
|
114 | |||
115 | from . import ( |
|
115 | from . import ( | |
116 | pywatchman, |
|
116 | pywatchman, | |
117 | state, |
|
117 | state, | |
118 | watchmanclient, |
|
118 | watchmanclient, | |
119 | ) |
|
119 | ) | |
120 |
|
120 | |||
121 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
121 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
122 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
122 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
123 | # be specifying the version(s) of Mercurial they are tested with, or |
|
123 | # be specifying the version(s) of Mercurial they are tested with, or | |
124 | # leave the attribute unspecified. |
|
124 | # leave the attribute unspecified. | |
125 | testedwith = 'ships-with-hg-core' |
|
125 | testedwith = 'ships-with-hg-core' | |
126 |
|
126 | |||
127 | # This extension is incompatible with the following blacklisted extensions |
|
127 | # This extension is incompatible with the following blacklisted extensions | |
128 | # and will disable itself when encountering one of these: |
|
128 | # and will disable itself when encountering one of these: | |
129 | _blacklist = ['largefiles', 'eol'] |
|
129 | _blacklist = ['largefiles', 'eol'] | |
130 |
|
130 | |||
131 | def _handleunavailable(ui, state, ex): |
|
131 | def _handleunavailable(ui, state, ex): | |
132 | """Exception handler for Watchman interaction exceptions""" |
|
132 | """Exception handler for Watchman interaction exceptions""" | |
133 | if isinstance(ex, watchmanclient.Unavailable): |
|
133 | if isinstance(ex, watchmanclient.Unavailable): | |
134 | if ex.warn: |
|
134 | if ex.warn: | |
135 | ui.warn(str(ex) + '\n') |
|
135 | ui.warn(str(ex) + '\n') | |
136 | if ex.invalidate: |
|
136 | if ex.invalidate: | |
137 | state.invalidate() |
|
137 | state.invalidate() | |
138 | ui.log('fsmonitor', 'Watchman unavailable: %s\n', ex.msg) |
|
138 | ui.log('fsmonitor', 'Watchman unavailable: %s\n', ex.msg) | |
139 | else: |
|
139 | else: | |
140 | ui.log('fsmonitor', 'Watchman exception: %s\n', ex) |
|
140 | ui.log('fsmonitor', 'Watchman exception: %s\n', ex) | |
141 |
|
141 | |||
142 | def _hashignore(ignore): |
|
142 | def _hashignore(ignore): | |
143 | """Calculate hash for ignore patterns and filenames |
|
143 | """Calculate hash for ignore patterns and filenames | |
144 |
|
144 | |||
145 | If this information changes between Mercurial invocations, we can't |
|
145 | If this information changes between Mercurial invocations, we can't | |
146 | rely on Watchman information anymore and have to re-scan the working |
|
146 | rely on Watchman information anymore and have to re-scan the working | |
147 | copy. |
|
147 | copy. | |
148 |
|
148 | |||
149 | """ |
|
149 | """ | |
150 | sha1 = hashlib.sha1() |
|
150 | sha1 = hashlib.sha1() | |
151 | sha1.update(repr(ignore)) |
|
151 | sha1.update(repr(ignore)) | |
152 | return sha1.hexdigest() |
|
152 | return sha1.hexdigest() | |
153 |
|
153 | |||
154 | _watchmanencoding = pywatchman.encoding.get_local_encoding() |
|
154 | _watchmanencoding = pywatchman.encoding.get_local_encoding() | |
155 | _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding() |
|
155 | _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding() | |
156 | _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding) |
|
156 | _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding) | |
157 |
|
157 | |||
158 | def _watchmantofsencoding(path): |
|
158 | def _watchmantofsencoding(path): | |
159 | """Fix path to match watchman and local filesystem encoding |
|
159 | """Fix path to match watchman and local filesystem encoding | |
160 |
|
160 | |||
161 | watchman's paths encoding can differ from filesystem encoding. For example, |
|
161 | watchman's paths encoding can differ from filesystem encoding. For example, | |
162 | on Windows, it's always utf-8. |
|
162 | on Windows, it's always utf-8. | |
163 | """ |
|
163 | """ | |
164 | try: |
|
164 | try: | |
165 | decoded = path.decode(_watchmanencoding) |
|
165 | decoded = path.decode(_watchmanencoding) | |
166 | except UnicodeDecodeError as e: |
|
166 | except UnicodeDecodeError as e: | |
167 | raise error.Abort(str(e), hint='watchman encoding error') |
|
167 | raise error.Abort(str(e), hint='watchman encoding error') | |
168 |
|
168 | |||
169 | try: |
|
169 | try: | |
170 | encoded = decoded.encode(_fsencoding, 'strict') |
|
170 | encoded = decoded.encode(_fsencoding, 'strict') | |
171 | except UnicodeEncodeError as e: |
|
171 | except UnicodeEncodeError as e: | |
172 | raise error.Abort(str(e)) |
|
172 | raise error.Abort(str(e)) | |
173 |
|
173 | |||
174 | return encoded |
|
174 | return encoded | |
175 |
|
175 | |||
176 | def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True): |
|
176 | def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True): | |
177 | '''Replacement for dirstate.walk, hooking into Watchman. |
|
177 | '''Replacement for dirstate.walk, hooking into Watchman. | |
178 |
|
178 | |||
179 | Whenever full is False, ignored is False, and the Watchman client is |
|
179 | Whenever full is False, ignored is False, and the Watchman client is | |
180 | available, use Watchman combined with saved state to possibly return only a |
|
180 | available, use Watchman combined with saved state to possibly return only a | |
181 | subset of files.''' |
|
181 | subset of files.''' | |
182 | def bail(): |
|
182 | def bail(): | |
183 | return orig(match, subrepos, unknown, ignored, full=True) |
|
183 | return orig(match, subrepos, unknown, ignored, full=True) | |
184 |
|
184 | |||
185 | if full or ignored or not self._watchmanclient.available(): |
|
185 | if full or ignored or not self._watchmanclient.available(): | |
186 | return bail() |
|
186 | return bail() | |
187 | state = self._fsmonitorstate |
|
187 | state = self._fsmonitorstate | |
188 | clock, ignorehash, notefiles = state.get() |
|
188 | clock, ignorehash, notefiles = state.get() | |
189 | if not clock: |
|
189 | if not clock: | |
190 | if state.walk_on_invalidate: |
|
190 | if state.walk_on_invalidate: | |
191 | return bail() |
|
191 | return bail() | |
192 | # Initial NULL clock value, see |
|
192 | # Initial NULL clock value, see | |
193 | # https://facebook.github.io/watchman/docs/clockspec.html |
|
193 | # https://facebook.github.io/watchman/docs/clockspec.html | |
194 | clock = 'c:0:0' |
|
194 | clock = 'c:0:0' | |
195 | notefiles = [] |
|
195 | notefiles = [] | |
196 |
|
196 | |||
197 | def fwarn(f, msg): |
|
197 | def fwarn(f, msg): | |
198 | self._ui.warn('%s: %s\n' % (self.pathto(f), msg)) |
|
198 | self._ui.warn('%s: %s\n' % (self.pathto(f), msg)) | |
199 | return False |
|
199 | return False | |
200 |
|
200 | |||
201 | def badtype(mode): |
|
201 | def badtype(mode): | |
202 | kind = _('unknown') |
|
202 | kind = _('unknown') | |
203 | if stat.S_ISCHR(mode): |
|
203 | if stat.S_ISCHR(mode): | |
204 | kind = _('character device') |
|
204 | kind = _('character device') | |
205 | elif stat.S_ISBLK(mode): |
|
205 | elif stat.S_ISBLK(mode): | |
206 | kind = _('block device') |
|
206 | kind = _('block device') | |
207 | elif stat.S_ISFIFO(mode): |
|
207 | elif stat.S_ISFIFO(mode): | |
208 | kind = _('fifo') |
|
208 | kind = _('fifo') | |
209 | elif stat.S_ISSOCK(mode): |
|
209 | elif stat.S_ISSOCK(mode): | |
210 | kind = _('socket') |
|
210 | kind = _('socket') | |
211 | elif stat.S_ISDIR(mode): |
|
211 | elif stat.S_ISDIR(mode): | |
212 | kind = _('directory') |
|
212 | kind = _('directory') | |
213 | return _('unsupported file type (type is %s)') % kind |
|
213 | return _('unsupported file type (type is %s)') % kind | |
214 |
|
214 | |||
215 | ignore = self._ignore |
|
215 | ignore = self._ignore | |
216 | dirignore = self._dirignore |
|
216 | dirignore = self._dirignore | |
217 | if unknown: |
|
217 | if unknown: | |
218 | if _hashignore(ignore) != ignorehash and clock != 'c:0:0': |
|
218 | if _hashignore(ignore) != ignorehash and clock != 'c:0:0': | |
219 | # ignore list changed -- can't rely on Watchman state any more |
|
219 | # ignore list changed -- can't rely on Watchman state any more | |
220 | if state.walk_on_invalidate: |
|
220 | if state.walk_on_invalidate: | |
221 | return bail() |
|
221 | return bail() | |
222 | notefiles = [] |
|
222 | notefiles = [] | |
223 | clock = 'c:0:0' |
|
223 | clock = 'c:0:0' | |
224 | else: |
|
224 | else: | |
225 | # always ignore |
|
225 | # always ignore | |
226 | ignore = util.always |
|
226 | ignore = util.always | |
227 | dirignore = util.always |
|
227 | dirignore = util.always | |
228 |
|
228 | |||
229 | matchfn = match.matchfn |
|
229 | matchfn = match.matchfn | |
230 | matchalways = match.always() |
|
230 | matchalways = match.always() | |
231 | dmap = self._map |
|
231 | dmap = self._map | |
232 | nonnormalset = getattr(self, '_nonnormalset', None) |
|
232 | nonnormalset = getattr(self, '_nonnormalset', None) | |
233 |
|
233 | |||
234 | copymap = self._copymap |
|
234 | copymap = self._copymap | |
235 | getkind = stat.S_IFMT |
|
235 | getkind = stat.S_IFMT | |
236 | dirkind = stat.S_IFDIR |
|
236 | dirkind = stat.S_IFDIR | |
237 | regkind = stat.S_IFREG |
|
237 | regkind = stat.S_IFREG | |
238 | lnkkind = stat.S_IFLNK |
|
238 | lnkkind = stat.S_IFLNK | |
239 | join = self._join |
|
239 | join = self._join | |
240 | normcase = util.normcase |
|
240 | normcase = util.normcase | |
241 | fresh_instance = False |
|
241 | fresh_instance = False | |
242 |
|
242 | |||
243 | exact = skipstep3 = False |
|
243 | exact = skipstep3 = False | |
244 | if match.isexact(): # match.exact |
|
244 | if match.isexact(): # match.exact | |
245 | exact = True |
|
245 | exact = True | |
246 | dirignore = util.always # skip step 2 |
|
246 | dirignore = util.always # skip step 2 | |
247 | elif match.prefix(): # match.match, no patterns |
|
247 | elif match.prefix(): # match.match, no patterns | |
248 | skipstep3 = True |
|
248 | skipstep3 = True | |
249 |
|
249 | |||
250 | if not exact and self._checkcase: |
|
250 | if not exact and self._checkcase: | |
251 | # note that even though we could receive directory entries, we're only |
|
251 | # note that even though we could receive directory entries, we're only | |
252 | # interested in checking if a file with the same name exists. So only |
|
252 | # interested in checking if a file with the same name exists. So only | |
253 | # normalize files if possible. |
|
253 | # normalize files if possible. | |
254 | normalize = self._normalizefile |
|
254 | normalize = self._normalizefile | |
255 | skipstep3 = False |
|
255 | skipstep3 = False | |
256 | else: |
|
256 | else: | |
257 | normalize = None |
|
257 | normalize = None | |
258 |
|
258 | |||
259 | # step 1: find all explicit files |
|
259 | # step 1: find all explicit files | |
260 | results, work, dirsnotfound = self._walkexplicit(match, subrepos) |
|
260 | results, work, dirsnotfound = self._walkexplicit(match, subrepos) | |
261 |
|
261 | |||
262 | skipstep3 = skipstep3 and not (work or dirsnotfound) |
|
262 | skipstep3 = skipstep3 and not (work or dirsnotfound) | |
263 | work = [d for d in work if not dirignore(d[0])] |
|
263 | work = [d for d in work if not dirignore(d[0])] | |
264 |
|
264 | |||
265 | if not work and (exact or skipstep3): |
|
265 | if not work and (exact or skipstep3): | |
266 | for s in subrepos: |
|
266 | for s in subrepos: | |
267 | del results[s] |
|
267 | del results[s] | |
268 | del results['.hg'] |
|
268 | del results['.hg'] | |
269 | return results |
|
269 | return results | |
270 |
|
270 | |||
271 | # step 2: query Watchman |
|
271 | # step 2: query Watchman | |
272 | try: |
|
272 | try: | |
273 | # Use the user-configured timeout for the query. |
|
273 | # Use the user-configured timeout for the query. | |
274 | # Add a little slack over the top of the user query to allow for |
|
274 | # Add a little slack over the top of the user query to allow for | |
275 | # overheads while transferring the data |
|
275 | # overheads while transferring the data | |
276 | self._watchmanclient.settimeout(state.timeout + 0.1) |
|
276 | self._watchmanclient.settimeout(state.timeout + 0.1) | |
277 | result = self._watchmanclient.command('query', { |
|
277 | result = self._watchmanclient.command('query', { | |
278 | 'fields': ['mode', 'mtime', 'size', 'exists', 'name'], |
|
278 | 'fields': ['mode', 'mtime', 'size', 'exists', 'name'], | |
279 | 'since': clock, |
|
279 | 'since': clock, | |
280 | 'expression': [ |
|
280 | 'expression': [ | |
281 | 'not', [ |
|
281 | 'not', [ | |
282 | 'anyof', ['dirname', '.hg'], |
|
282 | 'anyof', ['dirname', '.hg'], | |
283 | ['name', '.hg', 'wholename'] |
|
283 | ['name', '.hg', 'wholename'] | |
284 | ] |
|
284 | ] | |
285 | ], |
|
285 | ], | |
286 | 'sync_timeout': int(state.timeout * 1000), |
|
286 | 'sync_timeout': int(state.timeout * 1000), | |
287 | 'empty_on_fresh_instance': state.walk_on_invalidate, |
|
287 | 'empty_on_fresh_instance': state.walk_on_invalidate, | |
288 | }) |
|
288 | }) | |
289 | except Exception as ex: |
|
289 | except Exception as ex: | |
290 | _handleunavailable(self._ui, state, ex) |
|
290 | _handleunavailable(self._ui, state, ex) | |
291 | self._watchmanclient.clearconnection() |
|
291 | self._watchmanclient.clearconnection() | |
292 | return bail() |
|
292 | return bail() | |
293 | else: |
|
293 | else: | |
294 | # We need to propagate the last observed clock up so that we |
|
294 | # We need to propagate the last observed clock up so that we | |
295 | # can use it for our next query |
|
295 | # can use it for our next query | |
296 | state.setlastclock(result['clock']) |
|
296 | state.setlastclock(result['clock']) | |
297 | if result['is_fresh_instance']: |
|
297 | if result['is_fresh_instance']: | |
298 | if state.walk_on_invalidate: |
|
298 | if state.walk_on_invalidate: | |
299 | state.invalidate() |
|
299 | state.invalidate() | |
300 | return bail() |
|
300 | return bail() | |
301 | fresh_instance = True |
|
301 | fresh_instance = True | |
302 | # Ignore any prior noteable files from the state info |
|
302 | # Ignore any prior noteable files from the state info | |
303 | notefiles = [] |
|
303 | notefiles = [] | |
304 |
|
304 | |||
305 | # for file paths which require normalization and we encounter a case |
|
305 | # for file paths which require normalization and we encounter a case | |
306 | # collision, we store our own foldmap |
|
306 | # collision, we store our own foldmap | |
307 | if normalize: |
|
307 | if normalize: | |
308 | foldmap = dict((normcase(k), k) for k in results) |
|
308 | foldmap = dict((normcase(k), k) for k in results) | |
309 |
|
309 | |||
310 | switch_slashes = pycompat.ossep == '\\' |
|
310 | switch_slashes = pycompat.ossep == '\\' | |
311 | # The order of the results is, strictly speaking, undefined. |
|
311 | # The order of the results is, strictly speaking, undefined. | |
312 | # For case changes on a case insensitive filesystem we may receive |
|
312 | # For case changes on a case insensitive filesystem we may receive | |
313 | # two entries, one with exists=True and another with exists=False. |
|
313 | # two entries, one with exists=True and another with exists=False. | |
314 | # The exists=True entries in the same response should be interpreted |
|
314 | # The exists=True entries in the same response should be interpreted | |
315 | # as being happens-after the exists=False entries due to the way that |
|
315 | # as being happens-after the exists=False entries due to the way that | |
316 | # Watchman tracks files. We use this property to reconcile deletes |
|
316 | # Watchman tracks files. We use this property to reconcile deletes | |
317 | # for name case changes. |
|
317 | # for name case changes. | |
318 | for entry in result['files']: |
|
318 | for entry in result['files']: | |
319 | fname = entry['name'] |
|
319 | fname = entry['name'] | |
320 | if _fixencoding: |
|
320 | if _fixencoding: | |
321 | fname = _watchmantofsencoding(fname) |
|
321 | fname = _watchmantofsencoding(fname) | |
322 | if switch_slashes: |
|
322 | if switch_slashes: | |
323 | fname = fname.replace('\\', '/') |
|
323 | fname = fname.replace('\\', '/') | |
324 | if normalize: |
|
324 | if normalize: | |
325 | normed = normcase(fname) |
|
325 | normed = normcase(fname) | |
326 | fname = normalize(fname, True, True) |
|
326 | fname = normalize(fname, True, True) | |
327 | foldmap[normed] = fname |
|
327 | foldmap[normed] = fname | |
328 | fmode = entry['mode'] |
|
328 | fmode = entry['mode'] | |
329 | fexists = entry['exists'] |
|
329 | fexists = entry['exists'] | |
330 | kind = getkind(fmode) |
|
330 | kind = getkind(fmode) | |
331 |
|
331 | |||
332 | if not fexists: |
|
332 | if not fexists: | |
333 | # if marked as deleted and we don't already have a change |
|
333 | # if marked as deleted and we don't already have a change | |
334 | # record, mark it as deleted. If we already have an entry |
|
334 | # record, mark it as deleted. If we already have an entry | |
335 | # for fname then it was either part of walkexplicit or was |
|
335 | # for fname then it was either part of walkexplicit or was | |
336 | # an earlier result that was a case change |
|
336 | # an earlier result that was a case change | |
337 | if fname not in results and fname in dmap and ( |
|
337 | if fname not in results and fname in dmap and ( | |
338 | matchalways or matchfn(fname)): |
|
338 | matchalways or matchfn(fname)): | |
339 | results[fname] = None |
|
339 | results[fname] = None | |
340 | elif kind == dirkind: |
|
340 | elif kind == dirkind: | |
341 | if fname in dmap and (matchalways or matchfn(fname)): |
|
341 | if fname in dmap and (matchalways or matchfn(fname)): | |
342 | results[fname] = None |
|
342 | results[fname] = None | |
343 | elif kind == regkind or kind == lnkkind: |
|
343 | elif kind == regkind or kind == lnkkind: | |
344 | if fname in dmap: |
|
344 | if fname in dmap: | |
345 | if matchalways or matchfn(fname): |
|
345 | if matchalways or matchfn(fname): | |
346 | results[fname] = entry |
|
346 | results[fname] = entry | |
347 | elif (matchalways or matchfn(fname)) and not ignore(fname): |
|
347 | elif (matchalways or matchfn(fname)) and not ignore(fname): | |
348 | results[fname] = entry |
|
348 | results[fname] = entry | |
349 | elif fname in dmap and (matchalways or matchfn(fname)): |
|
349 | elif fname in dmap and (matchalways or matchfn(fname)): | |
350 | results[fname] = None |
|
350 | results[fname] = None | |
351 |
|
351 | |||
352 | # step 3: query notable files we don't already know about |
|
352 | # step 3: query notable files we don't already know about | |
353 | # XXX try not to iterate over the entire dmap |
|
353 | # XXX try not to iterate over the entire dmap | |
354 | if normalize: |
|
354 | if normalize: | |
355 | # any notable files that have changed case will already be handled |
|
355 | # any notable files that have changed case will already be handled | |
356 | # above, so just check membership in the foldmap |
|
356 | # above, so just check membership in the foldmap | |
357 | notefiles = set((normalize(f, True, True) for f in notefiles |
|
357 | notefiles = set((normalize(f, True, True) for f in notefiles | |
358 | if normcase(f) not in foldmap)) |
|
358 | if normcase(f) not in foldmap)) | |
359 | visit = set((f for f in notefiles if (f not in results and matchfn(f) |
|
359 | visit = set((f for f in notefiles if (f not in results and matchfn(f) | |
360 | and (f in dmap or not ignore(f))))) |
|
360 | and (f in dmap or not ignore(f))))) | |
361 |
|
361 | |||
362 | if nonnormalset is not None and not fresh_instance: |
|
362 | if nonnormalset is not None and not fresh_instance: | |
363 | if matchalways: |
|
363 | if matchalways: | |
364 | visit.update(f for f in nonnormalset if f not in results) |
|
364 | visit.update(f for f in nonnormalset if f not in results) | |
365 | visit.update(f for f in copymap if f not in results) |
|
365 | visit.update(f for f in copymap if f not in results) | |
366 | else: |
|
366 | else: | |
367 | visit.update(f for f in nonnormalset |
|
367 | visit.update(f for f in nonnormalset | |
368 | if f not in results and matchfn(f)) |
|
368 | if f not in results and matchfn(f)) | |
369 | visit.update(f for f in copymap |
|
369 | visit.update(f for f in copymap | |
370 | if f not in results and matchfn(f)) |
|
370 | if f not in results and matchfn(f)) | |
371 | else: |
|
371 | else: | |
372 | if matchalways: |
|
372 | if matchalways: | |
373 | visit.update(f for f, st in dmap.iteritems() |
|
373 | visit.update(f for f, st in dmap.iteritems() | |
374 | if (f not in results and |
|
374 | if (f not in results and | |
375 | (st[2] < 0 or st[0] != 'n' or fresh_instance))) |
|
375 | (st[2] < 0 or st[0] != 'n' or fresh_instance))) | |
376 | visit.update(f for f in copymap if f not in results) |
|
376 | visit.update(f for f in copymap if f not in results) | |
377 | else: |
|
377 | else: | |
378 | visit.update(f for f, st in dmap.iteritems() |
|
378 | visit.update(f for f, st in dmap.iteritems() | |
379 | if (f not in results and |
|
379 | if (f not in results and | |
380 | (st[2] < 0 or st[0] != 'n' or fresh_instance) |
|
380 | (st[2] < 0 or st[0] != 'n' or fresh_instance) | |
381 | and matchfn(f))) |
|
381 | and matchfn(f))) | |
382 | visit.update(f for f in copymap |
|
382 | visit.update(f for f in copymap | |
383 | if f not in results and matchfn(f)) |
|
383 | if f not in results and matchfn(f)) | |
384 |
|
384 | |||
385 | audit = pathutil.pathauditor(self._root).check |
|
385 | audit = pathutil.pathauditor(self._root).check | |
386 | auditpass = [f for f in visit if audit(f)] |
|
386 | auditpass = [f for f in visit if audit(f)] | |
387 | auditpass.sort() |
|
387 | auditpass.sort() | |
388 | auditfail = visit.difference(auditpass) |
|
388 | auditfail = visit.difference(auditpass) | |
389 | for f in auditfail: |
|
389 | for f in auditfail: | |
390 | results[f] = None |
|
390 | results[f] = None | |
391 |
|
391 | |||
392 | nf = iter(auditpass).next |
|
392 | nf = iter(auditpass).next | |
393 | for st in util.statfiles([join(f) for f in auditpass]): |
|
393 | for st in util.statfiles([join(f) for f in auditpass]): | |
394 | f = nf() |
|
394 | f = nf() | |
395 | if st or f in dmap: |
|
395 | if st or f in dmap: | |
396 | results[f] = st |
|
396 | results[f] = st | |
397 |
|
397 | |||
398 | for s in subrepos: |
|
398 | for s in subrepos: | |
399 | del results[s] |
|
399 | del results[s] | |
400 | del results['.hg'] |
|
400 | del results['.hg'] | |
401 | return results |
|
401 | return results | |
402 |
|
402 | |||
403 | def overridestatus( |
|
403 | def overridestatus( | |
404 | orig, self, node1='.', node2=None, match=None, ignored=False, |
|
404 | orig, self, node1='.', node2=None, match=None, ignored=False, | |
405 | clean=False, unknown=False, listsubrepos=False): |
|
405 | clean=False, unknown=False, listsubrepos=False): | |
406 | listignored = ignored |
|
406 | listignored = ignored | |
407 | listclean = clean |
|
407 | listclean = clean | |
408 | listunknown = unknown |
|
408 | listunknown = unknown | |
409 |
|
409 | |||
410 | def _cmpsets(l1, l2): |
|
410 | def _cmpsets(l1, l2): | |
411 | try: |
|
411 | try: | |
412 | if 'FSMONITOR_LOG_FILE' in encoding.environ: |
|
412 | if 'FSMONITOR_LOG_FILE' in encoding.environ: | |
413 | fn = encoding.environ['FSMONITOR_LOG_FILE'] |
|
413 | fn = encoding.environ['FSMONITOR_LOG_FILE'] | |
414 | f = open(fn, 'wb') |
|
414 | f = open(fn, 'wb') | |
415 | else: |
|
415 | else: | |
416 | fn = 'fsmonitorfail.log' |
|
416 | fn = 'fsmonitorfail.log' | |
417 | f = self.opener(fn, 'wb') |
|
417 | f = self.opener(fn, 'wb') | |
418 | except (IOError, OSError): |
|
418 | except (IOError, OSError): | |
419 | self.ui.warn(_('warning: unable to write to %s\n') % fn) |
|
419 | self.ui.warn(_('warning: unable to write to %s\n') % fn) | |
420 | return |
|
420 | return | |
421 |
|
421 | |||
422 | try: |
|
422 | try: | |
423 | for i, (s1, s2) in enumerate(zip(l1, l2)): |
|
423 | for i, (s1, s2) in enumerate(zip(l1, l2)): | |
424 | if set(s1) != set(s2): |
|
424 | if set(s1) != set(s2): | |
425 | f.write('sets at position %d are unequal\n' % i) |
|
425 | f.write('sets at position %d are unequal\n' % i) | |
426 | f.write('watchman returned: %s\n' % s1) |
|
426 | f.write('watchman returned: %s\n' % s1) | |
427 | f.write('stat returned: %s\n' % s2) |
|
427 | f.write('stat returned: %s\n' % s2) | |
428 | finally: |
|
428 | finally: | |
429 | f.close() |
|
429 | f.close() | |
430 |
|
430 | |||
431 | if isinstance(node1, context.changectx): |
|
431 | if isinstance(node1, context.changectx): | |
432 | ctx1 = node1 |
|
432 | ctx1 = node1 | |
433 | else: |
|
433 | else: | |
434 | ctx1 = self[node1] |
|
434 | ctx1 = self[node1] | |
435 | if isinstance(node2, context.changectx): |
|
435 | if isinstance(node2, context.changectx): | |
436 | ctx2 = node2 |
|
436 | ctx2 = node2 | |
437 | else: |
|
437 | else: | |
438 | ctx2 = self[node2] |
|
438 | ctx2 = self[node2] | |
439 |
|
439 | |||
440 | working = ctx2.rev() is None |
|
440 | working = ctx2.rev() is None | |
441 | parentworking = working and ctx1 == self['.'] |
|
441 | parentworking = working and ctx1 == self['.'] | |
442 | match = match or matchmod.always(self.root, self.getcwd()) |
|
442 | match = match or matchmod.always(self.root, self.getcwd()) | |
443 |
|
443 | |||
444 | # Maybe we can use this opportunity to update Watchman's state. |
|
444 | # Maybe we can use this opportunity to update Watchman's state. | |
445 | # Mercurial uses workingcommitctx and/or memctx to represent the part of |
|
445 | # Mercurial uses workingcommitctx and/or memctx to represent the part of | |
446 | # the workingctx that is to be committed. So don't update the state in |
|
446 | # the workingctx that is to be committed. So don't update the state in | |
447 | # that case. |
|
447 | # that case. | |
448 | # HG_PENDING is set in the environment when the dirstate is being updated |
|
448 | # HG_PENDING is set in the environment when the dirstate is being updated | |
449 | # in the middle of a transaction; we must not update our state in that |
|
449 | # in the middle of a transaction; we must not update our state in that | |
450 | # case, or we risk forgetting about changes in the working copy. |
|
450 | # case, or we risk forgetting about changes in the working copy. | |
451 | updatestate = (parentworking and match.always() and |
|
451 | updatestate = (parentworking and match.always() and | |
452 | not isinstance(ctx2, (context.workingcommitctx, |
|
452 | not isinstance(ctx2, (context.workingcommitctx, | |
453 | context.memctx)) and |
|
453 | context.memctx)) and | |
454 | 'HG_PENDING' not in encoding.environ) |
|
454 | 'HG_PENDING' not in encoding.environ) | |
455 |
|
455 | |||
456 | try: |
|
456 | try: | |
457 | if self._fsmonitorstate.walk_on_invalidate: |
|
457 | if self._fsmonitorstate.walk_on_invalidate: | |
458 | # Use a short timeout to query the current clock. If that |
|
458 | # Use a short timeout to query the current clock. If that | |
459 | # takes too long then we assume that the service will be slow |
|
459 | # takes too long then we assume that the service will be slow | |
460 | # to answer our query. |
|
460 | # to answer our query. | |
461 | # walk_on_invalidate indicates that we prefer to walk the |
|
461 | # walk_on_invalidate indicates that we prefer to walk the | |
462 | # tree ourselves because we can ignore portions that Watchman |
|
462 | # tree ourselves because we can ignore portions that Watchman | |
463 | # cannot and we tend to be faster in the warmer buffer cache |
|
463 | # cannot and we tend to be faster in the warmer buffer cache | |
464 | # cases. |
|
464 | # cases. | |
465 | self._watchmanclient.settimeout(0.1) |
|
465 | self._watchmanclient.settimeout(0.1) | |
466 | else: |
|
466 | else: | |
467 | # Give Watchman more time to potentially complete its walk |
|
467 | # Give Watchman more time to potentially complete its walk | |
468 | # and return the initial clock. In this mode we assume that |
|
468 | # and return the initial clock. In this mode we assume that | |
469 | # the filesystem will be slower than parsing a potentially |
|
469 | # the filesystem will be slower than parsing a potentially | |
470 | # very large Watchman result set. |
|
470 | # very large Watchman result set. | |
471 | self._watchmanclient.settimeout( |
|
471 | self._watchmanclient.settimeout( | |
472 | self._fsmonitorstate.timeout + 0.1) |
|
472 | self._fsmonitorstate.timeout + 0.1) | |
473 | startclock = self._watchmanclient.getcurrentclock() |
|
473 | startclock = self._watchmanclient.getcurrentclock() | |
474 | except Exception as ex: |
|
474 | except Exception as ex: | |
475 | self._watchmanclient.clearconnection() |
|
475 | self._watchmanclient.clearconnection() | |
476 | _handleunavailable(self.ui, self._fsmonitorstate, ex) |
|
476 | _handleunavailable(self.ui, self._fsmonitorstate, ex) | |
477 | # boo, Watchman failed. bail |
|
477 | # boo, Watchman failed. bail | |
478 | return orig(node1, node2, match, listignored, listclean, |
|
478 | return orig(node1, node2, match, listignored, listclean, | |
479 | listunknown, listsubrepos) |
|
479 | listunknown, listsubrepos) | |
480 |
|
480 | |||
481 | if updatestate: |
|
481 | if updatestate: | |
482 | # We need info about unknown files. This may make things slower the |
|
482 | # We need info about unknown files. This may make things slower the | |
483 | # first time, but whatever. |
|
483 | # first time, but whatever. | |
484 | stateunknown = True |
|
484 | stateunknown = True | |
485 | else: |
|
485 | else: | |
486 | stateunknown = listunknown |
|
486 | stateunknown = listunknown | |
487 |
|
487 | |||
488 | r = orig(node1, node2, match, listignored, listclean, stateunknown, |
|
488 | r = orig(node1, node2, match, listignored, listclean, stateunknown, | |
489 | listsubrepos) |
|
489 | listsubrepos) | |
490 | modified, added, removed, deleted, unknown, ignored, clean = r |
|
490 | modified, added, removed, deleted, unknown, ignored, clean = r | |
491 |
|
491 | |||
492 | if updatestate: |
|
492 | if updatestate: | |
493 | notefiles = modified + added + removed + deleted + unknown |
|
493 | notefiles = modified + added + removed + deleted + unknown | |
494 | self._fsmonitorstate.set( |
|
494 | self._fsmonitorstate.set( | |
495 | self._fsmonitorstate.getlastclock() or startclock, |
|
495 | self._fsmonitorstate.getlastclock() or startclock, | |
496 | _hashignore(self.dirstate._ignore), |
|
496 | _hashignore(self.dirstate._ignore), | |
497 | notefiles) |
|
497 | notefiles) | |
498 |
|
498 | |||
499 | if not listunknown: |
|
499 | if not listunknown: | |
500 | unknown = [] |
|
500 | unknown = [] | |
501 |
|
501 | |||
502 | # don't do paranoid checks if we're not going to query Watchman anyway |
|
502 | # don't do paranoid checks if we're not going to query Watchman anyway | |
503 | full = listclean or match.traversedir is not None |
|
503 | full = listclean or match.traversedir is not None | |
504 | if self._fsmonitorstate.mode == 'paranoid' and not full: |
|
504 | if self._fsmonitorstate.mode == 'paranoid' and not full: | |
505 | # run status again and fall back to the old walk this time |
|
505 | # run status again and fall back to the old walk this time | |
506 | self.dirstate._fsmonitordisable = True |
|
506 | self.dirstate._fsmonitordisable = True | |
507 |
|
507 | |||
508 | # shut the UI up |
|
508 | # shut the UI up | |
509 | quiet = self.ui.quiet |
|
509 | quiet = self.ui.quiet | |
510 | self.ui.quiet = True |
|
510 | self.ui.quiet = True | |
511 | fout, ferr = self.ui.fout, self.ui.ferr |
|
511 | fout, ferr = self.ui.fout, self.ui.ferr | |
512 | self.ui.fout = self.ui.ferr = open(os.devnull, 'wb') |
|
512 | self.ui.fout = self.ui.ferr = open(os.devnull, 'wb') | |
513 |
|
513 | |||
514 | try: |
|
514 | try: | |
515 | rv2 = orig( |
|
515 | rv2 = orig( | |
516 | node1, node2, match, listignored, listclean, listunknown, |
|
516 | node1, node2, match, listignored, listclean, listunknown, | |
517 | listsubrepos) |
|
517 | listsubrepos) | |
518 | finally: |
|
518 | finally: | |
519 | self.dirstate._fsmonitordisable = False |
|
519 | self.dirstate._fsmonitordisable = False | |
520 | self.ui.quiet = quiet |
|
520 | self.ui.quiet = quiet | |
521 | self.ui.fout, self.ui.ferr = fout, ferr |
|
521 | self.ui.fout, self.ui.ferr = fout, ferr | |
522 |
|
522 | |||
523 | # clean isn't tested since it's set to True above |
|
523 | # clean isn't tested since it's set to True above | |
524 | _cmpsets([modified, added, removed, deleted, unknown, ignored, clean], |
|
524 | _cmpsets([modified, added, removed, deleted, unknown, ignored, clean], | |
525 | rv2) |
|
525 | rv2) | |
526 | modified, added, removed, deleted, unknown, ignored, clean = rv2 |
|
526 | modified, added, removed, deleted, unknown, ignored, clean = rv2 | |
527 |
|
527 | |||
528 | return scmutil.status( |
|
528 | return scmutil.status( | |
529 | modified, added, removed, deleted, unknown, ignored, clean) |
|
529 | modified, added, removed, deleted, unknown, ignored, clean) | |
530 |
|
530 | |||
531 | def makedirstate(cls): |
|
531 | def makedirstate(cls): | |
532 | class fsmonitordirstate(cls): |
|
532 | class fsmonitordirstate(cls): | |
533 | def _fsmonitorinit(self, fsmonitorstate, watchmanclient): |
|
533 | def _fsmonitorinit(self, fsmonitorstate, watchmanclient): | |
534 | # _fsmonitordisable is used in paranoid mode |
|
534 | # _fsmonitordisable is used in paranoid mode | |
535 | self._fsmonitordisable = False |
|
535 | self._fsmonitordisable = False | |
536 | self._fsmonitorstate = fsmonitorstate |
|
536 | self._fsmonitorstate = fsmonitorstate | |
537 | self._watchmanclient = watchmanclient |
|
537 | self._watchmanclient = watchmanclient | |
538 |
|
538 | |||
539 | def walk(self, *args, **kwargs): |
|
539 | def walk(self, *args, **kwargs): | |
540 | orig = super(fsmonitordirstate, self).walk |
|
540 | orig = super(fsmonitordirstate, self).walk | |
541 | if self._fsmonitordisable: |
|
541 | if self._fsmonitordisable: | |
542 | return orig(*args, **kwargs) |
|
542 | return orig(*args, **kwargs) | |
543 | return overridewalk(orig, self, *args, **kwargs) |
|
543 | return overridewalk(orig, self, *args, **kwargs) | |
544 |
|
544 | |||
545 | def rebuild(self, *args, **kwargs): |
|
545 | def rebuild(self, *args, **kwargs): | |
546 | self._fsmonitorstate.invalidate() |
|
546 | self._fsmonitorstate.invalidate() | |
547 | return super(fsmonitordirstate, self).rebuild(*args, **kwargs) |
|
547 | return super(fsmonitordirstate, self).rebuild(*args, **kwargs) | |
548 |
|
548 | |||
549 | def invalidate(self, *args, **kwargs): |
|
549 | def invalidate(self, *args, **kwargs): | |
550 | self._fsmonitorstate.invalidate() |
|
550 | self._fsmonitorstate.invalidate() | |
551 | return super(fsmonitordirstate, self).invalidate(*args, **kwargs) |
|
551 | return super(fsmonitordirstate, self).invalidate(*args, **kwargs) | |
552 |
|
552 | |||
553 | return fsmonitordirstate |
|
553 | return fsmonitordirstate | |
554 |
|
554 | |||
555 | def wrapdirstate(orig, self): |
|
555 | def wrapdirstate(orig, self): | |
556 | ds = orig(self) |
|
556 | ds = orig(self) | |
557 | # only override the dirstate when Watchman is available for the repo |
|
557 | # only override the dirstate when Watchman is available for the repo | |
558 | if util.safehasattr(self, '_fsmonitorstate'): |
|
558 | if util.safehasattr(self, '_fsmonitorstate'): | |
559 | ds.__class__ = makedirstate(ds.__class__) |
|
559 | ds.__class__ = makedirstate(ds.__class__) | |
560 | ds._fsmonitorinit(self._fsmonitorstate, self._watchmanclient) |
|
560 | ds._fsmonitorinit(self._fsmonitorstate, self._watchmanclient) | |
561 | return ds |
|
561 | return ds | |
562 |
|
562 | |||
563 | def extsetup(ui): |
|
563 | def extsetup(ui): | |
564 | wrapfilecache(localrepo.localrepository, 'dirstate', wrapdirstate) |
|
564 | extensions.wrapfilecache( | |
|
565 | localrepo.localrepository, 'dirstate', wrapdirstate) | |||
565 | if pycompat.sysplatform == 'darwin': |
|
566 | if pycompat.sysplatform == 'darwin': | |
566 | # An assist for avoiding the dangling-symlink fsevents bug |
|
567 | # An assist for avoiding the dangling-symlink fsevents bug | |
567 | extensions.wrapfunction(os, 'symlink', wrapsymlink) |
|
568 | extensions.wrapfunction(os, 'symlink', wrapsymlink) | |
568 |
|
569 | |||
569 | extensions.wrapfunction(merge, 'update', wrapupdate) |
|
570 | extensions.wrapfunction(merge, 'update', wrapupdate) | |
570 |
|
571 | |||
571 | def wrapsymlink(orig, source, link_name): |
|
572 | def wrapsymlink(orig, source, link_name): | |
572 | ''' if we create a dangling symlink, also touch the parent dir |
|
573 | ''' if we create a dangling symlink, also touch the parent dir | |
573 | to encourage fsevents notifications to work more correctly ''' |
|
574 | to encourage fsevents notifications to work more correctly ''' | |
574 | try: |
|
575 | try: | |
575 | return orig(source, link_name) |
|
576 | return orig(source, link_name) | |
576 | finally: |
|
577 | finally: | |
577 | try: |
|
578 | try: | |
578 | os.utime(os.path.dirname(link_name), None) |
|
579 | os.utime(os.path.dirname(link_name), None) | |
579 | except OSError: |
|
580 | except OSError: | |
580 | pass |
|
581 | pass | |
581 |
|
582 | |||
582 | class state_update(object): |
|
583 | class state_update(object): | |
583 | ''' This context manager is responsible for dispatching the state-enter |
|
584 | ''' This context manager is responsible for dispatching the state-enter | |
584 | and state-leave signals to the watchman service ''' |
|
585 | and state-leave signals to the watchman service ''' | |
585 |
|
586 | |||
586 | def __init__(self, repo, node, distance, partial): |
|
587 | def __init__(self, repo, node, distance, partial): | |
587 | self.repo = repo |
|
588 | self.repo = repo | |
588 | self.node = node |
|
589 | self.node = node | |
589 | self.distance = distance |
|
590 | self.distance = distance | |
590 | self.partial = partial |
|
591 | self.partial = partial | |
591 | self._lock = None |
|
592 | self._lock = None | |
592 | self.need_leave = False |
|
593 | self.need_leave = False | |
593 |
|
594 | |||
594 | def __enter__(self): |
|
595 | def __enter__(self): | |
595 | # We explicitly need to take a lock here, before we proceed to update |
|
596 | # We explicitly need to take a lock here, before we proceed to update | |
596 | # watchman about the update operation, so that we don't race with |
|
597 | # watchman about the update operation, so that we don't race with | |
597 | # some other actor. merge.update is going to take the wlock almost |
|
598 | # some other actor. merge.update is going to take the wlock almost | |
598 | # immediately anyway, so this is effectively extending the lock |
|
599 | # immediately anyway, so this is effectively extending the lock | |
599 | # around a couple of short sanity checks. |
|
600 | # around a couple of short sanity checks. | |
600 | self._lock = self.repo.wlock() |
|
601 | self._lock = self.repo.wlock() | |
601 | self.need_leave = self._state('state-enter') |
|
602 | self.need_leave = self._state('state-enter') | |
602 | return self |
|
603 | return self | |
603 |
|
604 | |||
604 | def __exit__(self, type_, value, tb): |
|
605 | def __exit__(self, type_, value, tb): | |
605 | try: |
|
606 | try: | |
606 | if self.need_leave: |
|
607 | if self.need_leave: | |
607 | status = 'ok' if type_ is None else 'failed' |
|
608 | status = 'ok' if type_ is None else 'failed' | |
608 | self._state('state-leave', status=status) |
|
609 | self._state('state-leave', status=status) | |
609 | finally: |
|
610 | finally: | |
610 | if self._lock: |
|
611 | if self._lock: | |
611 | self._lock.release() |
|
612 | self._lock.release() | |
612 |
|
613 | |||
613 | def _state(self, cmd, status='ok'): |
|
614 | def _state(self, cmd, status='ok'): | |
614 | if not util.safehasattr(self.repo, '_watchmanclient'): |
|
615 | if not util.safehasattr(self.repo, '_watchmanclient'): | |
615 | return False |
|
616 | return False | |
616 | try: |
|
617 | try: | |
617 | commithash = self.repo[self.node].hex() |
|
618 | commithash = self.repo[self.node].hex() | |
618 | self.repo._watchmanclient.command(cmd, { |
|
619 | self.repo._watchmanclient.command(cmd, { | |
619 | 'name': 'hg.update', |
|
620 | 'name': 'hg.update', | |
620 | 'metadata': { |
|
621 | 'metadata': { | |
621 | # the target revision |
|
622 | # the target revision | |
622 | 'rev': commithash, |
|
623 | 'rev': commithash, | |
623 | # approximate number of commits between current and target |
|
624 | # approximate number of commits between current and target | |
624 | 'distance': self.distance, |
|
625 | 'distance': self.distance, | |
625 | # success/failure (only really meaningful for state-leave) |
|
626 | # success/failure (only really meaningful for state-leave) | |
626 | 'status': status, |
|
627 | 'status': status, | |
627 | # whether the working copy parent is changing |
|
628 | # whether the working copy parent is changing | |
628 | 'partial': self.partial, |
|
629 | 'partial': self.partial, | |
629 | }}) |
|
630 | }}) | |
630 | return True |
|
631 | return True | |
631 | except Exception as e: |
|
632 | except Exception as e: | |
632 | # Swallow any errors; fire and forget |
|
633 | # Swallow any errors; fire and forget | |
633 | self.repo.ui.log( |
|
634 | self.repo.ui.log( | |
634 | 'watchman', 'Exception %s while running %s\n', e, cmd) |
|
635 | 'watchman', 'Exception %s while running %s\n', e, cmd) | |
635 | return False |
|
636 | return False | |
636 |
|
637 | |||
637 | # Bracket working copy updates with calls to the watchman state-enter |
|
638 | # Bracket working copy updates with calls to the watchman state-enter | |
638 | # and state-leave commands. This allows clients to perform more intelligent |
|
639 | # and state-leave commands. This allows clients to perform more intelligent | |
639 | # settling during bulk file change scenarios |
|
640 | # settling during bulk file change scenarios | |
640 | # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling |
|
641 | # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling | |
641 | def wrapupdate(orig, repo, node, branchmerge, force, ancestor=None, |
|
642 | def wrapupdate(orig, repo, node, branchmerge, force, ancestor=None, | |
642 | mergeancestor=False, labels=None, matcher=None, **kwargs): |
|
643 | mergeancestor=False, labels=None, matcher=None, **kwargs): | |
643 |
|
644 | |||
644 | distance = 0 |
|
645 | distance = 0 | |
645 | partial = True |
|
646 | partial = True | |
646 | if matcher is None or matcher.always(): |
|
647 | if matcher is None or matcher.always(): | |
647 | partial = False |
|
648 | partial = False | |
648 | wc = repo[None] |
|
649 | wc = repo[None] | |
649 | parents = wc.parents() |
|
650 | parents = wc.parents() | |
650 | if len(parents) == 2: |
|
651 | if len(parents) == 2: | |
651 | anc = repo.changelog.ancestor(parents[0].node(), parents[1].node()) |
|
652 | anc = repo.changelog.ancestor(parents[0].node(), parents[1].node()) | |
652 | ancrev = repo[anc].rev() |
|
653 | ancrev = repo[anc].rev() | |
653 | distance = abs(repo[node].rev() - ancrev) |
|
654 | distance = abs(repo[node].rev() - ancrev) | |
654 | elif len(parents) == 1: |
|
655 | elif len(parents) == 1: | |
655 | distance = abs(repo[node].rev() - parents[0].rev()) |
|
656 | distance = abs(repo[node].rev() - parents[0].rev()) | |
656 |
|
657 | |||
657 | with state_update(repo, node, distance, partial): |
|
658 | with state_update(repo, node, distance, partial): | |
658 | return orig( |
|
659 | return orig( | |
659 | repo, node, branchmerge, force, ancestor, mergeancestor, |
|
660 | repo, node, branchmerge, force, ancestor, mergeancestor, | |
660 | labels, matcher, **kwargs) |
|
661 | labels, matcher, **kwargs) | |
661 |
|
662 | |||
662 | def reposetup(ui, repo): |
|
663 | def reposetup(ui, repo): | |
663 | # We don't work with largefiles or inotify |
|
664 | # We don't work with largefiles or inotify | |
664 | exts = extensions.enabled() |
|
665 | exts = extensions.enabled() | |
665 | for ext in _blacklist: |
|
666 | for ext in _blacklist: | |
666 | if ext in exts: |
|
667 | if ext in exts: | |
667 | ui.warn(_('The fsmonitor extension is incompatible with the %s ' |
|
668 | ui.warn(_('The fsmonitor extension is incompatible with the %s ' | |
668 | 'extension and has been disabled.\n') % ext) |
|
669 | 'extension and has been disabled.\n') % ext) | |
669 | return |
|
670 | return | |
670 |
|
671 | |||
671 | if util.safehasattr(repo, 'dirstate'): |
|
672 | if util.safehasattr(repo, 'dirstate'): | |
672 | # We don't work with subrepos either. Note that we can get passed in |
|
673 | # We don't work with subrepos either. Note that we can get passed in | |
673 | # e.g. a statichttprepo, which throws on trying to access the substate. |
|
674 | # e.g. a statichttprepo, which throws on trying to access the substate. | |
674 | # XXX This sucks. |
|
675 | # XXX This sucks. | |
675 | try: |
|
676 | try: | |
676 | # if repo[None].substate can cause a dirstate parse, which is too |
|
677 | # if repo[None].substate can cause a dirstate parse, which is too | |
677 | # slow. Instead, look for a file called hgsubstate, |
|
678 | # slow. Instead, look for a file called hgsubstate, | |
678 | if repo.wvfs.exists('.hgsubstate') or repo.wvfs.exists('.hgsub'): |
|
679 | if repo.wvfs.exists('.hgsubstate') or repo.wvfs.exists('.hgsub'): | |
679 | return |
|
680 | return | |
680 | except AttributeError: |
|
681 | except AttributeError: | |
681 | return |
|
682 | return | |
682 |
|
683 | |||
683 | fsmonitorstate = state.state(repo) |
|
684 | fsmonitorstate = state.state(repo) | |
684 | if fsmonitorstate.mode == 'off': |
|
685 | if fsmonitorstate.mode == 'off': | |
685 | return |
|
686 | return | |
686 |
|
687 | |||
687 | try: |
|
688 | try: | |
688 | client = watchmanclient.client(repo) |
|
689 | client = watchmanclient.client(repo) | |
689 | except Exception as ex: |
|
690 | except Exception as ex: | |
690 | _handleunavailable(ui, fsmonitorstate, ex) |
|
691 | _handleunavailable(ui, fsmonitorstate, ex) | |
691 | return |
|
692 | return | |
692 |
|
693 | |||
693 | repo._fsmonitorstate = fsmonitorstate |
|
694 | repo._fsmonitorstate = fsmonitorstate | |
694 | repo._watchmanclient = client |
|
695 | repo._watchmanclient = client | |
695 |
|
696 | |||
696 | # at this point since fsmonitorstate wasn't present, repo.dirstate is |
|
697 | # at this point since fsmonitorstate wasn't present, repo.dirstate is | |
697 | # not a fsmonitordirstate |
|
698 | # not a fsmonitordirstate | |
698 | dirstate = repo.dirstate |
|
699 | dirstate = repo.dirstate | |
699 | dirstate.__class__ = makedirstate(dirstate.__class__) |
|
700 | dirstate.__class__ = makedirstate(dirstate.__class__) | |
700 | dirstate._fsmonitorinit(fsmonitorstate, client) |
|
701 | dirstate._fsmonitorinit(fsmonitorstate, client) | |
701 | # invalidate property cache, but keep filecache which contains the |
|
702 | # invalidate property cache, but keep filecache which contains the | |
702 | # wrapped dirstate object |
|
703 | # wrapped dirstate object | |
703 | del repo.unfiltered().__dict__['dirstate'] |
|
704 | del repo.unfiltered().__dict__['dirstate'] | |
704 | assert dirstate is repo._filecache['dirstate'].obj |
|
705 | assert dirstate is repo._filecache['dirstate'].obj | |
705 |
|
706 | |||
706 | class fsmonitorrepo(repo.__class__): |
|
707 | class fsmonitorrepo(repo.__class__): | |
707 | def status(self, *args, **kwargs): |
|
708 | def status(self, *args, **kwargs): | |
708 | orig = super(fsmonitorrepo, self).status |
|
709 | orig = super(fsmonitorrepo, self).status | |
709 | return overridestatus(orig, self, *args, **kwargs) |
|
710 | return overridestatus(orig, self, *args, **kwargs) | |
710 |
|
711 | |||
711 | repo.__class__ = fsmonitorrepo |
|
712 | repo.__class__ = fsmonitorrepo | |
712 |
|
||||
713 | def wrapfilecache(cls, propname, wrapper): |
|
|||
714 | """Wraps a filecache property. These can't be wrapped using the normal |
|
|||
715 | wrapfunction. This should eventually go into upstream Mercurial. |
|
|||
716 | """ |
|
|||
717 | assert callable(wrapper) |
|
|||
718 | for currcls in cls.__mro__: |
|
|||
719 | if propname in currcls.__dict__: |
|
|||
720 | origfn = currcls.__dict__[propname].func |
|
|||
721 | assert callable(origfn) |
|
|||
722 | def wrap(*args, **kwargs): |
|
|||
723 | return wrapper(origfn, *args, **kwargs) |
|
|||
724 | currcls.__dict__[propname].func = wrap |
|
|||
725 | break |
|
|||
726 |
|
||||
727 | if currcls is object: |
|
|||
728 | raise AttributeError( |
|
|||
729 | _("type '%s' has no property '%s'") % (cls, propname)) |
|
@@ -1,578 +1,597 b'' | |||||
1 | # extensions.py - extension handling for mercurial |
|
1 | # extensions.py - extension handling for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import imp |
|
10 | import imp | |
11 | import inspect |
|
11 | import inspect | |
12 | import os |
|
12 | import os | |
13 |
|
13 | |||
14 | from .i18n import ( |
|
14 | from .i18n import ( | |
15 | _, |
|
15 | _, | |
16 | gettext, |
|
16 | gettext, | |
17 | ) |
|
17 | ) | |
18 |
|
18 | |||
19 | from . import ( |
|
19 | from . import ( | |
20 | cmdutil, |
|
20 | cmdutil, | |
21 | encoding, |
|
21 | encoding, | |
22 | error, |
|
22 | error, | |
23 | pycompat, |
|
23 | pycompat, | |
24 | util, |
|
24 | util, | |
25 | ) |
|
25 | ) | |
26 |
|
26 | |||
27 | _extensions = {} |
|
27 | _extensions = {} | |
28 | _disabledextensions = {} |
|
28 | _disabledextensions = {} | |
29 | _aftercallbacks = {} |
|
29 | _aftercallbacks = {} | |
30 | _order = [] |
|
30 | _order = [] | |
31 | _builtin = {'hbisect', 'bookmarks', 'parentrevspec', 'progress', 'interhg', |
|
31 | _builtin = {'hbisect', 'bookmarks', 'parentrevspec', 'progress', 'interhg', | |
32 | 'inotify', 'hgcia'} |
|
32 | 'inotify', 'hgcia'} | |
33 |
|
33 | |||
34 | def extensions(ui=None): |
|
34 | def extensions(ui=None): | |
35 | if ui: |
|
35 | if ui: | |
36 | def enabled(name): |
|
36 | def enabled(name): | |
37 | for format in ['%s', 'hgext.%s']: |
|
37 | for format in ['%s', 'hgext.%s']: | |
38 | conf = ui.config('extensions', format % name) |
|
38 | conf = ui.config('extensions', format % name) | |
39 | if conf is not None and not conf.startswith('!'): |
|
39 | if conf is not None and not conf.startswith('!'): | |
40 | return True |
|
40 | return True | |
41 | else: |
|
41 | else: | |
42 | enabled = lambda name: True |
|
42 | enabled = lambda name: True | |
43 | for name in _order: |
|
43 | for name in _order: | |
44 | module = _extensions[name] |
|
44 | module = _extensions[name] | |
45 | if module and enabled(name): |
|
45 | if module and enabled(name): | |
46 | yield name, module |
|
46 | yield name, module | |
47 |
|
47 | |||
48 | def find(name): |
|
48 | def find(name): | |
49 | '''return module with given extension name''' |
|
49 | '''return module with given extension name''' | |
50 | mod = None |
|
50 | mod = None | |
51 | try: |
|
51 | try: | |
52 | mod = _extensions[name] |
|
52 | mod = _extensions[name] | |
53 | except KeyError: |
|
53 | except KeyError: | |
54 | for k, v in _extensions.iteritems(): |
|
54 | for k, v in _extensions.iteritems(): | |
55 | if k.endswith('.' + name) or k.endswith('/' + name): |
|
55 | if k.endswith('.' + name) or k.endswith('/' + name): | |
56 | mod = v |
|
56 | mod = v | |
57 | break |
|
57 | break | |
58 | if not mod: |
|
58 | if not mod: | |
59 | raise KeyError(name) |
|
59 | raise KeyError(name) | |
60 | return mod |
|
60 | return mod | |
61 |
|
61 | |||
62 | def loadpath(path, module_name): |
|
62 | def loadpath(path, module_name): | |
63 | module_name = module_name.replace('.', '_') |
|
63 | module_name = module_name.replace('.', '_') | |
64 | path = util.normpath(util.expandpath(path)) |
|
64 | path = util.normpath(util.expandpath(path)) | |
65 | module_name = pycompat.fsdecode(module_name) |
|
65 | module_name = pycompat.fsdecode(module_name) | |
66 | path = pycompat.fsdecode(path) |
|
66 | path = pycompat.fsdecode(path) | |
67 | if os.path.isdir(path): |
|
67 | if os.path.isdir(path): | |
68 | # module/__init__.py style |
|
68 | # module/__init__.py style | |
69 | d, f = os.path.split(path) |
|
69 | d, f = os.path.split(path) | |
70 | fd, fpath, desc = imp.find_module(f, [d]) |
|
70 | fd, fpath, desc = imp.find_module(f, [d]) | |
71 | return imp.load_module(module_name, fd, fpath, desc) |
|
71 | return imp.load_module(module_name, fd, fpath, desc) | |
72 | else: |
|
72 | else: | |
73 | try: |
|
73 | try: | |
74 | return imp.load_source(module_name, path) |
|
74 | return imp.load_source(module_name, path) | |
75 | except IOError as exc: |
|
75 | except IOError as exc: | |
76 | if not exc.filename: |
|
76 | if not exc.filename: | |
77 | exc.filename = path # python does not fill this |
|
77 | exc.filename = path # python does not fill this | |
78 | raise |
|
78 | raise | |
79 |
|
79 | |||
80 | def _importh(name): |
|
80 | def _importh(name): | |
81 | """import and return the <name> module""" |
|
81 | """import and return the <name> module""" | |
82 | mod = __import__(pycompat.sysstr(name)) |
|
82 | mod = __import__(pycompat.sysstr(name)) | |
83 | components = name.split('.') |
|
83 | components = name.split('.') | |
84 | for comp in components[1:]: |
|
84 | for comp in components[1:]: | |
85 | mod = getattr(mod, comp) |
|
85 | mod = getattr(mod, comp) | |
86 | return mod |
|
86 | return mod | |
87 |
|
87 | |||
88 | def _importext(name, path=None, reportfunc=None): |
|
88 | def _importext(name, path=None, reportfunc=None): | |
89 | if path: |
|
89 | if path: | |
90 | # the module will be loaded in sys.modules |
|
90 | # the module will be loaded in sys.modules | |
91 | # choose an unique name so that it doesn't |
|
91 | # choose an unique name so that it doesn't | |
92 | # conflicts with other modules |
|
92 | # conflicts with other modules | |
93 | mod = loadpath(path, 'hgext.%s' % name) |
|
93 | mod = loadpath(path, 'hgext.%s' % name) | |
94 | else: |
|
94 | else: | |
95 | try: |
|
95 | try: | |
96 | mod = _importh("hgext.%s" % name) |
|
96 | mod = _importh("hgext.%s" % name) | |
97 | except ImportError as err: |
|
97 | except ImportError as err: | |
98 | if reportfunc: |
|
98 | if reportfunc: | |
99 | reportfunc(err, "hgext.%s" % name, "hgext3rd.%s" % name) |
|
99 | reportfunc(err, "hgext.%s" % name, "hgext3rd.%s" % name) | |
100 | try: |
|
100 | try: | |
101 | mod = _importh("hgext3rd.%s" % name) |
|
101 | mod = _importh("hgext3rd.%s" % name) | |
102 | except ImportError as err: |
|
102 | except ImportError as err: | |
103 | if reportfunc: |
|
103 | if reportfunc: | |
104 | reportfunc(err, "hgext3rd.%s" % name, name) |
|
104 | reportfunc(err, "hgext3rd.%s" % name, name) | |
105 | mod = _importh(name) |
|
105 | mod = _importh(name) | |
106 | return mod |
|
106 | return mod | |
107 |
|
107 | |||
108 | def _forbytes(inst): |
|
108 | def _forbytes(inst): | |
109 | """Portably format an import error into a form suitable for |
|
109 | """Portably format an import error into a form suitable for | |
110 | %-formatting into bytestrings.""" |
|
110 | %-formatting into bytestrings.""" | |
111 | return encoding.strtolocal(str(inst)) |
|
111 | return encoding.strtolocal(str(inst)) | |
112 |
|
112 | |||
113 | def _reportimporterror(ui, err, failed, next): |
|
113 | def _reportimporterror(ui, err, failed, next): | |
114 | # note: this ui.debug happens before --debug is processed, |
|
114 | # note: this ui.debug happens before --debug is processed, | |
115 | # Use --config ui.debug=1 to see them. |
|
115 | # Use --config ui.debug=1 to see them. | |
116 | ui.debug('could not import %s (%s): trying %s\n' |
|
116 | ui.debug('could not import %s (%s): trying %s\n' | |
117 | % (failed, _forbytes(err), next)) |
|
117 | % (failed, _forbytes(err), next)) | |
118 | if ui.debugflag: |
|
118 | if ui.debugflag: | |
119 | ui.traceback() |
|
119 | ui.traceback() | |
120 |
|
120 | |||
121 | # attributes set by registrar.command |
|
121 | # attributes set by registrar.command | |
122 | _cmdfuncattrs = ('norepo', 'optionalrepo', 'inferrepo') |
|
122 | _cmdfuncattrs = ('norepo', 'optionalrepo', 'inferrepo') | |
123 |
|
123 | |||
124 | def _validatecmdtable(ui, cmdtable): |
|
124 | def _validatecmdtable(ui, cmdtable): | |
125 | """Check if extension commands have required attributes""" |
|
125 | """Check if extension commands have required attributes""" | |
126 | for c, e in cmdtable.iteritems(): |
|
126 | for c, e in cmdtable.iteritems(): | |
127 | f = e[0] |
|
127 | f = e[0] | |
128 | if getattr(f, '_deprecatedregistrar', False): |
|
128 | if getattr(f, '_deprecatedregistrar', False): | |
129 | ui.deprecwarn("cmdutil.command is deprecated, use " |
|
129 | ui.deprecwarn("cmdutil.command is deprecated, use " | |
130 | "registrar.command to register '%s'" % c, '4.6') |
|
130 | "registrar.command to register '%s'" % c, '4.6') | |
131 | missing = [a for a in _cmdfuncattrs if not util.safehasattr(f, a)] |
|
131 | missing = [a for a in _cmdfuncattrs if not util.safehasattr(f, a)] | |
132 | if not missing: |
|
132 | if not missing: | |
133 | continue |
|
133 | continue | |
134 | raise error.ProgrammingError( |
|
134 | raise error.ProgrammingError( | |
135 | 'missing attributes: %s' % ', '.join(missing), |
|
135 | 'missing attributes: %s' % ', '.join(missing), | |
136 | hint="use @command decorator to register '%s'" % c) |
|
136 | hint="use @command decorator to register '%s'" % c) | |
137 |
|
137 | |||
138 | def load(ui, name, path): |
|
138 | def load(ui, name, path): | |
139 | if name.startswith('hgext.') or name.startswith('hgext/'): |
|
139 | if name.startswith('hgext.') or name.startswith('hgext/'): | |
140 | shortname = name[6:] |
|
140 | shortname = name[6:] | |
141 | else: |
|
141 | else: | |
142 | shortname = name |
|
142 | shortname = name | |
143 | if shortname in _builtin: |
|
143 | if shortname in _builtin: | |
144 | return None |
|
144 | return None | |
145 | if shortname in _extensions: |
|
145 | if shortname in _extensions: | |
146 | return _extensions[shortname] |
|
146 | return _extensions[shortname] | |
147 | _extensions[shortname] = None |
|
147 | _extensions[shortname] = None | |
148 | mod = _importext(name, path, bind(_reportimporterror, ui)) |
|
148 | mod = _importext(name, path, bind(_reportimporterror, ui)) | |
149 |
|
149 | |||
150 | # Before we do anything with the extension, check against minimum stated |
|
150 | # Before we do anything with the extension, check against minimum stated | |
151 | # compatibility. This gives extension authors a mechanism to have their |
|
151 | # compatibility. This gives extension authors a mechanism to have their | |
152 | # extensions short circuit when loaded with a known incompatible version |
|
152 | # extensions short circuit when loaded with a known incompatible version | |
153 | # of Mercurial. |
|
153 | # of Mercurial. | |
154 | minver = getattr(mod, 'minimumhgversion', None) |
|
154 | minver = getattr(mod, 'minimumhgversion', None) | |
155 | if minver and util.versiontuple(minver, 2) > util.versiontuple(n=2): |
|
155 | if minver and util.versiontuple(minver, 2) > util.versiontuple(n=2): | |
156 | ui.warn(_('(third party extension %s requires version %s or newer ' |
|
156 | ui.warn(_('(third party extension %s requires version %s or newer ' | |
157 | 'of Mercurial; disabling)\n') % (shortname, minver)) |
|
157 | 'of Mercurial; disabling)\n') % (shortname, minver)) | |
158 | return |
|
158 | return | |
159 | _validatecmdtable(ui, getattr(mod, 'cmdtable', {})) |
|
159 | _validatecmdtable(ui, getattr(mod, 'cmdtable', {})) | |
160 |
|
160 | |||
161 | _extensions[shortname] = mod |
|
161 | _extensions[shortname] = mod | |
162 | _order.append(shortname) |
|
162 | _order.append(shortname) | |
163 | for fn in _aftercallbacks.get(shortname, []): |
|
163 | for fn in _aftercallbacks.get(shortname, []): | |
164 | fn(loaded=True) |
|
164 | fn(loaded=True) | |
165 | return mod |
|
165 | return mod | |
166 |
|
166 | |||
167 | def _runuisetup(name, ui): |
|
167 | def _runuisetup(name, ui): | |
168 | uisetup = getattr(_extensions[name], 'uisetup', None) |
|
168 | uisetup = getattr(_extensions[name], 'uisetup', None) | |
169 | if uisetup: |
|
169 | if uisetup: | |
170 | uisetup(ui) |
|
170 | uisetup(ui) | |
171 |
|
171 | |||
172 | def _runextsetup(name, ui): |
|
172 | def _runextsetup(name, ui): | |
173 | extsetup = getattr(_extensions[name], 'extsetup', None) |
|
173 | extsetup = getattr(_extensions[name], 'extsetup', None) | |
174 | if extsetup: |
|
174 | if extsetup: | |
175 | try: |
|
175 | try: | |
176 | extsetup(ui) |
|
176 | extsetup(ui) | |
177 | except TypeError: |
|
177 | except TypeError: | |
178 | if inspect.getargspec(extsetup).args: |
|
178 | if inspect.getargspec(extsetup).args: | |
179 | raise |
|
179 | raise | |
180 | extsetup() # old extsetup with no ui argument |
|
180 | extsetup() # old extsetup with no ui argument | |
181 |
|
181 | |||
182 | def loadall(ui, whitelist=None): |
|
182 | def loadall(ui, whitelist=None): | |
183 | result = ui.configitems("extensions") |
|
183 | result = ui.configitems("extensions") | |
184 | if whitelist is not None: |
|
184 | if whitelist is not None: | |
185 | result = [(k, v) for (k, v) in result if k in whitelist] |
|
185 | result = [(k, v) for (k, v) in result if k in whitelist] | |
186 | newindex = len(_order) |
|
186 | newindex = len(_order) | |
187 | for (name, path) in result: |
|
187 | for (name, path) in result: | |
188 | if path: |
|
188 | if path: | |
189 | if path[0:1] == '!': |
|
189 | if path[0:1] == '!': | |
190 | _disabledextensions[name] = path[1:] |
|
190 | _disabledextensions[name] = path[1:] | |
191 | continue |
|
191 | continue | |
192 | try: |
|
192 | try: | |
193 | load(ui, name, path) |
|
193 | load(ui, name, path) | |
194 | except Exception as inst: |
|
194 | except Exception as inst: | |
195 | msg = _forbytes(inst) |
|
195 | msg = _forbytes(inst) | |
196 | if path: |
|
196 | if path: | |
197 | ui.warn(_("*** failed to import extension %s from %s: %s\n") |
|
197 | ui.warn(_("*** failed to import extension %s from %s: %s\n") | |
198 | % (name, path, msg)) |
|
198 | % (name, path, msg)) | |
199 | else: |
|
199 | else: | |
200 | ui.warn(_("*** failed to import extension %s: %s\n") |
|
200 | ui.warn(_("*** failed to import extension %s: %s\n") | |
201 | % (name, msg)) |
|
201 | % (name, msg)) | |
202 | if isinstance(inst, error.Hint) and inst.hint: |
|
202 | if isinstance(inst, error.Hint) and inst.hint: | |
203 | ui.warn(_("*** (%s)\n") % inst.hint) |
|
203 | ui.warn(_("*** (%s)\n") % inst.hint) | |
204 | ui.traceback() |
|
204 | ui.traceback() | |
205 |
|
205 | |||
206 | for name in _order[newindex:]: |
|
206 | for name in _order[newindex:]: | |
207 | _runuisetup(name, ui) |
|
207 | _runuisetup(name, ui) | |
208 |
|
208 | |||
209 | for name in _order[newindex:]: |
|
209 | for name in _order[newindex:]: | |
210 | _runextsetup(name, ui) |
|
210 | _runextsetup(name, ui) | |
211 |
|
211 | |||
212 | # Call aftercallbacks that were never met. |
|
212 | # Call aftercallbacks that were never met. | |
213 | for shortname in _aftercallbacks: |
|
213 | for shortname in _aftercallbacks: | |
214 | if shortname in _extensions: |
|
214 | if shortname in _extensions: | |
215 | continue |
|
215 | continue | |
216 |
|
216 | |||
217 | for fn in _aftercallbacks[shortname]: |
|
217 | for fn in _aftercallbacks[shortname]: | |
218 | fn(loaded=False) |
|
218 | fn(loaded=False) | |
219 |
|
219 | |||
220 | # loadall() is called multiple times and lingering _aftercallbacks |
|
220 | # loadall() is called multiple times and lingering _aftercallbacks | |
221 | # entries could result in double execution. See issue4646. |
|
221 | # entries could result in double execution. See issue4646. | |
222 | _aftercallbacks.clear() |
|
222 | _aftercallbacks.clear() | |
223 |
|
223 | |||
224 | def afterloaded(extension, callback): |
|
224 | def afterloaded(extension, callback): | |
225 | '''Run the specified function after a named extension is loaded. |
|
225 | '''Run the specified function after a named extension is loaded. | |
226 |
|
226 | |||
227 | If the named extension is already loaded, the callback will be called |
|
227 | If the named extension is already loaded, the callback will be called | |
228 | immediately. |
|
228 | immediately. | |
229 |
|
229 | |||
230 | If the named extension never loads, the callback will be called after |
|
230 | If the named extension never loads, the callback will be called after | |
231 | all extensions have been loaded. |
|
231 | all extensions have been loaded. | |
232 |
|
232 | |||
233 | The callback receives the named argument ``loaded``, which is a boolean |
|
233 | The callback receives the named argument ``loaded``, which is a boolean | |
234 | indicating whether the dependent extension actually loaded. |
|
234 | indicating whether the dependent extension actually loaded. | |
235 | ''' |
|
235 | ''' | |
236 |
|
236 | |||
237 | if extension in _extensions: |
|
237 | if extension in _extensions: | |
238 | callback(loaded=True) |
|
238 | callback(loaded=True) | |
239 | else: |
|
239 | else: | |
240 | _aftercallbacks.setdefault(extension, []).append(callback) |
|
240 | _aftercallbacks.setdefault(extension, []).append(callback) | |
241 |
|
241 | |||
242 | def bind(func, *args): |
|
242 | def bind(func, *args): | |
243 | '''Partial function application |
|
243 | '''Partial function application | |
244 |
|
244 | |||
245 | Returns a new function that is the partial application of args and kwargs |
|
245 | Returns a new function that is the partial application of args and kwargs | |
246 | to func. For example, |
|
246 | to func. For example, | |
247 |
|
247 | |||
248 | f(1, 2, bar=3) === bind(f, 1)(2, bar=3)''' |
|
248 | f(1, 2, bar=3) === bind(f, 1)(2, bar=3)''' | |
249 | assert callable(func) |
|
249 | assert callable(func) | |
250 | def closure(*a, **kw): |
|
250 | def closure(*a, **kw): | |
251 | return func(*(args + a), **kw) |
|
251 | return func(*(args + a), **kw) | |
252 | return closure |
|
252 | return closure | |
253 |
|
253 | |||
254 | def _updatewrapper(wrap, origfn, unboundwrapper): |
|
254 | def _updatewrapper(wrap, origfn, unboundwrapper): | |
255 | '''Copy and add some useful attributes to wrapper''' |
|
255 | '''Copy and add some useful attributes to wrapper''' | |
256 | wrap.__module__ = getattr(origfn, '__module__') |
|
256 | wrap.__module__ = getattr(origfn, '__module__') | |
257 | wrap.__doc__ = getattr(origfn, '__doc__') |
|
257 | wrap.__doc__ = getattr(origfn, '__doc__') | |
258 | wrap.__dict__.update(getattr(origfn, '__dict__', {})) |
|
258 | wrap.__dict__.update(getattr(origfn, '__dict__', {})) | |
259 | wrap._origfunc = origfn |
|
259 | wrap._origfunc = origfn | |
260 | wrap._unboundwrapper = unboundwrapper |
|
260 | wrap._unboundwrapper = unboundwrapper | |
261 |
|
261 | |||
262 | def wrapcommand(table, command, wrapper, synopsis=None, docstring=None): |
|
262 | def wrapcommand(table, command, wrapper, synopsis=None, docstring=None): | |
263 | '''Wrap the command named `command' in table |
|
263 | '''Wrap the command named `command' in table | |
264 |
|
264 | |||
265 | Replace command in the command table with wrapper. The wrapped command will |
|
265 | Replace command in the command table with wrapper. The wrapped command will | |
266 | be inserted into the command table specified by the table argument. |
|
266 | be inserted into the command table specified by the table argument. | |
267 |
|
267 | |||
268 | The wrapper will be called like |
|
268 | The wrapper will be called like | |
269 |
|
269 | |||
270 | wrapper(orig, *args, **kwargs) |
|
270 | wrapper(orig, *args, **kwargs) | |
271 |
|
271 | |||
272 | where orig is the original (wrapped) function, and *args, **kwargs |
|
272 | where orig is the original (wrapped) function, and *args, **kwargs | |
273 | are the arguments passed to it. |
|
273 | are the arguments passed to it. | |
274 |
|
274 | |||
275 | Optionally append to the command synopsis and docstring, used for help. |
|
275 | Optionally append to the command synopsis and docstring, used for help. | |
276 | For example, if your extension wraps the ``bookmarks`` command to add the |
|
276 | For example, if your extension wraps the ``bookmarks`` command to add the | |
277 | flags ``--remote`` and ``--all`` you might call this function like so: |
|
277 | flags ``--remote`` and ``--all`` you might call this function like so: | |
278 |
|
278 | |||
279 | synopsis = ' [-a] [--remote]' |
|
279 | synopsis = ' [-a] [--remote]' | |
280 | docstring = """ |
|
280 | docstring = """ | |
281 |
|
281 | |||
282 | The ``remotenames`` extension adds the ``--remote`` and ``--all`` (``-a``) |
|
282 | The ``remotenames`` extension adds the ``--remote`` and ``--all`` (``-a``) | |
283 | flags to the bookmarks command. Either flag will show the remote bookmarks |
|
283 | flags to the bookmarks command. Either flag will show the remote bookmarks | |
284 | known to the repository; ``--remote`` will also suppress the output of the |
|
284 | known to the repository; ``--remote`` will also suppress the output of the | |
285 | local bookmarks. |
|
285 | local bookmarks. | |
286 | """ |
|
286 | """ | |
287 |
|
287 | |||
288 | extensions.wrapcommand(commands.table, 'bookmarks', exbookmarks, |
|
288 | extensions.wrapcommand(commands.table, 'bookmarks', exbookmarks, | |
289 | synopsis, docstring) |
|
289 | synopsis, docstring) | |
290 | ''' |
|
290 | ''' | |
291 | assert callable(wrapper) |
|
291 | assert callable(wrapper) | |
292 | aliases, entry = cmdutil.findcmd(command, table) |
|
292 | aliases, entry = cmdutil.findcmd(command, table) | |
293 | for alias, e in table.iteritems(): |
|
293 | for alias, e in table.iteritems(): | |
294 | if e is entry: |
|
294 | if e is entry: | |
295 | key = alias |
|
295 | key = alias | |
296 | break |
|
296 | break | |
297 |
|
297 | |||
298 | origfn = entry[0] |
|
298 | origfn = entry[0] | |
299 | wrap = bind(util.checksignature(wrapper), util.checksignature(origfn)) |
|
299 | wrap = bind(util.checksignature(wrapper), util.checksignature(origfn)) | |
300 | _updatewrapper(wrap, origfn, wrapper) |
|
300 | _updatewrapper(wrap, origfn, wrapper) | |
301 | if docstring is not None: |
|
301 | if docstring is not None: | |
302 | wrap.__doc__ += docstring |
|
302 | wrap.__doc__ += docstring | |
303 |
|
303 | |||
304 | newentry = list(entry) |
|
304 | newentry = list(entry) | |
305 | newentry[0] = wrap |
|
305 | newentry[0] = wrap | |
306 | if synopsis is not None: |
|
306 | if synopsis is not None: | |
307 | newentry[2] += synopsis |
|
307 | newentry[2] += synopsis | |
308 | table[key] = tuple(newentry) |
|
308 | table[key] = tuple(newentry) | |
309 | return entry |
|
309 | return entry | |
310 |
|
310 | |||
|
311 | def wrapfilecache(cls, propname, wrapper): | |||
|
312 | """Wraps a filecache property. | |||
|
313 | ||||
|
314 | These can't be wrapped using the normal wrapfunction. | |||
|
315 | """ | |||
|
316 | assert callable(wrapper) | |||
|
317 | for currcls in cls.__mro__: | |||
|
318 | if propname in currcls.__dict__: | |||
|
319 | origfn = currcls.__dict__[propname].func | |||
|
320 | assert callable(origfn) | |||
|
321 | def wrap(*args, **kwargs): | |||
|
322 | return wrapper(origfn, *args, **kwargs) | |||
|
323 | currcls.__dict__[propname].func = wrap | |||
|
324 | break | |||
|
325 | ||||
|
326 | if currcls is object: | |||
|
327 | raise AttributeError( | |||
|
328 | _("type '%s' has no property '%s'") % (cls, propname)) | |||
|
329 | ||||
311 | def wrapfunction(container, funcname, wrapper): |
|
330 | def wrapfunction(container, funcname, wrapper): | |
312 | '''Wrap the function named funcname in container |
|
331 | '''Wrap the function named funcname in container | |
313 |
|
332 | |||
314 | Replace the funcname member in the given container with the specified |
|
333 | Replace the funcname member in the given container with the specified | |
315 | wrapper. The container is typically a module, class, or instance. |
|
334 | wrapper. The container is typically a module, class, or instance. | |
316 |
|
335 | |||
317 | The wrapper will be called like |
|
336 | The wrapper will be called like | |
318 |
|
337 | |||
319 | wrapper(orig, *args, **kwargs) |
|
338 | wrapper(orig, *args, **kwargs) | |
320 |
|
339 | |||
321 | where orig is the original (wrapped) function, and *args, **kwargs |
|
340 | where orig is the original (wrapped) function, and *args, **kwargs | |
322 | are the arguments passed to it. |
|
341 | are the arguments passed to it. | |
323 |
|
342 | |||
324 | Wrapping methods of the repository object is not recommended since |
|
343 | Wrapping methods of the repository object is not recommended since | |
325 | it conflicts with extensions that extend the repository by |
|
344 | it conflicts with extensions that extend the repository by | |
326 | subclassing. All extensions that need to extend methods of |
|
345 | subclassing. All extensions that need to extend methods of | |
327 | localrepository should use this subclassing trick: namely, |
|
346 | localrepository should use this subclassing trick: namely, | |
328 | reposetup() should look like |
|
347 | reposetup() should look like | |
329 |
|
348 | |||
330 | def reposetup(ui, repo): |
|
349 | def reposetup(ui, repo): | |
331 | class myrepo(repo.__class__): |
|
350 | class myrepo(repo.__class__): | |
332 | def whatever(self, *args, **kwargs): |
|
351 | def whatever(self, *args, **kwargs): | |
333 | [...extension stuff...] |
|
352 | [...extension stuff...] | |
334 | super(myrepo, self).whatever(*args, **kwargs) |
|
353 | super(myrepo, self).whatever(*args, **kwargs) | |
335 | [...extension stuff...] |
|
354 | [...extension stuff...] | |
336 |
|
355 | |||
337 | repo.__class__ = myrepo |
|
356 | repo.__class__ = myrepo | |
338 |
|
357 | |||
339 | In general, combining wrapfunction() with subclassing does not |
|
358 | In general, combining wrapfunction() with subclassing does not | |
340 | work. Since you cannot control what other extensions are loaded by |
|
359 | work. Since you cannot control what other extensions are loaded by | |
341 | your end users, you should play nicely with others by using the |
|
360 | your end users, you should play nicely with others by using the | |
342 | subclass trick. |
|
361 | subclass trick. | |
343 | ''' |
|
362 | ''' | |
344 | assert callable(wrapper) |
|
363 | assert callable(wrapper) | |
345 |
|
364 | |||
346 | origfn = getattr(container, funcname) |
|
365 | origfn = getattr(container, funcname) | |
347 | assert callable(origfn) |
|
366 | assert callable(origfn) | |
348 | wrap = bind(wrapper, origfn) |
|
367 | wrap = bind(wrapper, origfn) | |
349 | _updatewrapper(wrap, origfn, wrapper) |
|
368 | _updatewrapper(wrap, origfn, wrapper) | |
350 | setattr(container, funcname, wrap) |
|
369 | setattr(container, funcname, wrap) | |
351 | return origfn |
|
370 | return origfn | |
352 |
|
371 | |||
353 | def unwrapfunction(container, funcname, wrapper=None): |
|
372 | def unwrapfunction(container, funcname, wrapper=None): | |
354 | '''undo wrapfunction |
|
373 | '''undo wrapfunction | |
355 |
|
374 | |||
356 | If wrappers is None, undo the last wrap. Otherwise removes the wrapper |
|
375 | If wrappers is None, undo the last wrap. Otherwise removes the wrapper | |
357 | from the chain of wrappers. |
|
376 | from the chain of wrappers. | |
358 |
|
377 | |||
359 | Return the removed wrapper. |
|
378 | Return the removed wrapper. | |
360 | Raise IndexError if wrapper is None and nothing to unwrap; ValueError if |
|
379 | Raise IndexError if wrapper is None and nothing to unwrap; ValueError if | |
361 | wrapper is not None but is not found in the wrapper chain. |
|
380 | wrapper is not None but is not found in the wrapper chain. | |
362 | ''' |
|
381 | ''' | |
363 | chain = getwrapperchain(container, funcname) |
|
382 | chain = getwrapperchain(container, funcname) | |
364 | origfn = chain.pop() |
|
383 | origfn = chain.pop() | |
365 | if wrapper is None: |
|
384 | if wrapper is None: | |
366 | wrapper = chain[0] |
|
385 | wrapper = chain[0] | |
367 | chain.remove(wrapper) |
|
386 | chain.remove(wrapper) | |
368 | setattr(container, funcname, origfn) |
|
387 | setattr(container, funcname, origfn) | |
369 | for w in reversed(chain): |
|
388 | for w in reversed(chain): | |
370 | wrapfunction(container, funcname, w) |
|
389 | wrapfunction(container, funcname, w) | |
371 | return wrapper |
|
390 | return wrapper | |
372 |
|
391 | |||
373 | def getwrapperchain(container, funcname): |
|
392 | def getwrapperchain(container, funcname): | |
374 | '''get a chain of wrappers of a function |
|
393 | '''get a chain of wrappers of a function | |
375 |
|
394 | |||
376 | Return a list of functions: [newest wrapper, ..., oldest wrapper, origfunc] |
|
395 | Return a list of functions: [newest wrapper, ..., oldest wrapper, origfunc] | |
377 |
|
396 | |||
378 | The wrapper functions are the ones passed to wrapfunction, whose first |
|
397 | The wrapper functions are the ones passed to wrapfunction, whose first | |
379 | argument is origfunc. |
|
398 | argument is origfunc. | |
380 | ''' |
|
399 | ''' | |
381 | result = [] |
|
400 | result = [] | |
382 | fn = getattr(container, funcname) |
|
401 | fn = getattr(container, funcname) | |
383 | while fn: |
|
402 | while fn: | |
384 | assert callable(fn) |
|
403 | assert callable(fn) | |
385 | result.append(getattr(fn, '_unboundwrapper', fn)) |
|
404 | result.append(getattr(fn, '_unboundwrapper', fn)) | |
386 | fn = getattr(fn, '_origfunc', None) |
|
405 | fn = getattr(fn, '_origfunc', None) | |
387 | return result |
|
406 | return result | |
388 |
|
407 | |||
389 | def _disabledpaths(strip_init=False): |
|
408 | def _disabledpaths(strip_init=False): | |
390 | '''find paths of disabled extensions. returns a dict of {name: path} |
|
409 | '''find paths of disabled extensions. returns a dict of {name: path} | |
391 | removes /__init__.py from packages if strip_init is True''' |
|
410 | removes /__init__.py from packages if strip_init is True''' | |
392 | import hgext |
|
411 | import hgext | |
393 | extpath = os.path.dirname( |
|
412 | extpath = os.path.dirname( | |
394 | os.path.abspath(pycompat.fsencode(hgext.__file__))) |
|
413 | os.path.abspath(pycompat.fsencode(hgext.__file__))) | |
395 | try: # might not be a filesystem path |
|
414 | try: # might not be a filesystem path | |
396 | files = os.listdir(extpath) |
|
415 | files = os.listdir(extpath) | |
397 | except OSError: |
|
416 | except OSError: | |
398 | return {} |
|
417 | return {} | |
399 |
|
418 | |||
400 | exts = {} |
|
419 | exts = {} | |
401 | for e in files: |
|
420 | for e in files: | |
402 | if e.endswith('.py'): |
|
421 | if e.endswith('.py'): | |
403 | name = e.rsplit('.', 1)[0] |
|
422 | name = e.rsplit('.', 1)[0] | |
404 | path = os.path.join(extpath, e) |
|
423 | path = os.path.join(extpath, e) | |
405 | else: |
|
424 | else: | |
406 | name = e |
|
425 | name = e | |
407 | path = os.path.join(extpath, e, '__init__.py') |
|
426 | path = os.path.join(extpath, e, '__init__.py') | |
408 | if not os.path.exists(path): |
|
427 | if not os.path.exists(path): | |
409 | continue |
|
428 | continue | |
410 | if strip_init: |
|
429 | if strip_init: | |
411 | path = os.path.dirname(path) |
|
430 | path = os.path.dirname(path) | |
412 | if name in exts or name in _order or name == '__init__': |
|
431 | if name in exts or name in _order or name == '__init__': | |
413 | continue |
|
432 | continue | |
414 | exts[name] = path |
|
433 | exts[name] = path | |
415 | exts.update(_disabledextensions) |
|
434 | exts.update(_disabledextensions) | |
416 | return exts |
|
435 | return exts | |
417 |
|
436 | |||
418 | def _moduledoc(file): |
|
437 | def _moduledoc(file): | |
419 | '''return the top-level python documentation for the given file |
|
438 | '''return the top-level python documentation for the given file | |
420 |
|
439 | |||
421 | Loosely inspired by pydoc.source_synopsis(), but rewritten to |
|
440 | Loosely inspired by pydoc.source_synopsis(), but rewritten to | |
422 | handle triple quotes and to return the whole text instead of just |
|
441 | handle triple quotes and to return the whole text instead of just | |
423 | the synopsis''' |
|
442 | the synopsis''' | |
424 | result = [] |
|
443 | result = [] | |
425 |
|
444 | |||
426 | line = file.readline() |
|
445 | line = file.readline() | |
427 | while line[:1] == '#' or not line.strip(): |
|
446 | while line[:1] == '#' or not line.strip(): | |
428 | line = file.readline() |
|
447 | line = file.readline() | |
429 | if not line: |
|
448 | if not line: | |
430 | break |
|
449 | break | |
431 |
|
450 | |||
432 | start = line[:3] |
|
451 | start = line[:3] | |
433 | if start == '"""' or start == "'''": |
|
452 | if start == '"""' or start == "'''": | |
434 | line = line[3:] |
|
453 | line = line[3:] | |
435 | while line: |
|
454 | while line: | |
436 | if line.rstrip().endswith(start): |
|
455 | if line.rstrip().endswith(start): | |
437 | line = line.split(start)[0] |
|
456 | line = line.split(start)[0] | |
438 | if line: |
|
457 | if line: | |
439 | result.append(line) |
|
458 | result.append(line) | |
440 | break |
|
459 | break | |
441 | elif not line: |
|
460 | elif not line: | |
442 | return None # unmatched delimiter |
|
461 | return None # unmatched delimiter | |
443 | result.append(line) |
|
462 | result.append(line) | |
444 | line = file.readline() |
|
463 | line = file.readline() | |
445 | else: |
|
464 | else: | |
446 | return None |
|
465 | return None | |
447 |
|
466 | |||
448 | return ''.join(result) |
|
467 | return ''.join(result) | |
449 |
|
468 | |||
450 | def _disabledhelp(path): |
|
469 | def _disabledhelp(path): | |
451 | '''retrieve help synopsis of a disabled extension (without importing)''' |
|
470 | '''retrieve help synopsis of a disabled extension (without importing)''' | |
452 | try: |
|
471 | try: | |
453 | file = open(path) |
|
472 | file = open(path) | |
454 | except IOError: |
|
473 | except IOError: | |
455 | return |
|
474 | return | |
456 | else: |
|
475 | else: | |
457 | doc = _moduledoc(file) |
|
476 | doc = _moduledoc(file) | |
458 | file.close() |
|
477 | file.close() | |
459 |
|
478 | |||
460 | if doc: # extracting localized synopsis |
|
479 | if doc: # extracting localized synopsis | |
461 | return gettext(doc) |
|
480 | return gettext(doc) | |
462 | else: |
|
481 | else: | |
463 | return _('(no help text available)') |
|
482 | return _('(no help text available)') | |
464 |
|
483 | |||
465 | def disabled(): |
|
484 | def disabled(): | |
466 | '''find disabled extensions from hgext. returns a dict of {name: desc}''' |
|
485 | '''find disabled extensions from hgext. returns a dict of {name: desc}''' | |
467 | try: |
|
486 | try: | |
468 | from hgext import __index__ |
|
487 | from hgext import __index__ | |
469 | return dict((name, gettext(desc)) |
|
488 | return dict((name, gettext(desc)) | |
470 | for name, desc in __index__.docs.iteritems() |
|
489 | for name, desc in __index__.docs.iteritems() | |
471 | if name not in _order) |
|
490 | if name not in _order) | |
472 | except (ImportError, AttributeError): |
|
491 | except (ImportError, AttributeError): | |
473 | pass |
|
492 | pass | |
474 |
|
493 | |||
475 | paths = _disabledpaths() |
|
494 | paths = _disabledpaths() | |
476 | if not paths: |
|
495 | if not paths: | |
477 | return {} |
|
496 | return {} | |
478 |
|
497 | |||
479 | exts = {} |
|
498 | exts = {} | |
480 | for name, path in paths.iteritems(): |
|
499 | for name, path in paths.iteritems(): | |
481 | doc = _disabledhelp(path) |
|
500 | doc = _disabledhelp(path) | |
482 | if doc: |
|
501 | if doc: | |
483 | exts[name] = doc.splitlines()[0] |
|
502 | exts[name] = doc.splitlines()[0] | |
484 |
|
503 | |||
485 | return exts |
|
504 | return exts | |
486 |
|
505 | |||
487 | def disabledext(name): |
|
506 | def disabledext(name): | |
488 | '''find a specific disabled extension from hgext. returns desc''' |
|
507 | '''find a specific disabled extension from hgext. returns desc''' | |
489 | try: |
|
508 | try: | |
490 | from hgext import __index__ |
|
509 | from hgext import __index__ | |
491 | if name in _order: # enabled |
|
510 | if name in _order: # enabled | |
492 | return |
|
511 | return | |
493 | else: |
|
512 | else: | |
494 | return gettext(__index__.docs.get(name)) |
|
513 | return gettext(__index__.docs.get(name)) | |
495 | except (ImportError, AttributeError): |
|
514 | except (ImportError, AttributeError): | |
496 | pass |
|
515 | pass | |
497 |
|
516 | |||
498 | paths = _disabledpaths() |
|
517 | paths = _disabledpaths() | |
499 | if name in paths: |
|
518 | if name in paths: | |
500 | return _disabledhelp(paths[name]) |
|
519 | return _disabledhelp(paths[name]) | |
501 |
|
520 | |||
502 | def disabledcmd(ui, cmd, strict=False): |
|
521 | def disabledcmd(ui, cmd, strict=False): | |
503 | '''import disabled extensions until cmd is found. |
|
522 | '''import disabled extensions until cmd is found. | |
504 | returns (cmdname, extname, module)''' |
|
523 | returns (cmdname, extname, module)''' | |
505 |
|
524 | |||
506 | paths = _disabledpaths(strip_init=True) |
|
525 | paths = _disabledpaths(strip_init=True) | |
507 | if not paths: |
|
526 | if not paths: | |
508 | raise error.UnknownCommand(cmd) |
|
527 | raise error.UnknownCommand(cmd) | |
509 |
|
528 | |||
510 | def findcmd(cmd, name, path): |
|
529 | def findcmd(cmd, name, path): | |
511 | try: |
|
530 | try: | |
512 | mod = loadpath(path, 'hgext.%s' % name) |
|
531 | mod = loadpath(path, 'hgext.%s' % name) | |
513 | except Exception: |
|
532 | except Exception: | |
514 | return |
|
533 | return | |
515 | try: |
|
534 | try: | |
516 | aliases, entry = cmdutil.findcmd(cmd, |
|
535 | aliases, entry = cmdutil.findcmd(cmd, | |
517 | getattr(mod, 'cmdtable', {}), strict) |
|
536 | getattr(mod, 'cmdtable', {}), strict) | |
518 | except (error.AmbiguousCommand, error.UnknownCommand): |
|
537 | except (error.AmbiguousCommand, error.UnknownCommand): | |
519 | return |
|
538 | return | |
520 | except Exception: |
|
539 | except Exception: | |
521 | ui.warn(_('warning: error finding commands in %s\n') % path) |
|
540 | ui.warn(_('warning: error finding commands in %s\n') % path) | |
522 | ui.traceback() |
|
541 | ui.traceback() | |
523 | return |
|
542 | return | |
524 | for c in aliases: |
|
543 | for c in aliases: | |
525 | if c.startswith(cmd): |
|
544 | if c.startswith(cmd): | |
526 | cmd = c |
|
545 | cmd = c | |
527 | break |
|
546 | break | |
528 | else: |
|
547 | else: | |
529 | cmd = aliases[0] |
|
548 | cmd = aliases[0] | |
530 | return (cmd, name, mod) |
|
549 | return (cmd, name, mod) | |
531 |
|
550 | |||
532 | ext = None |
|
551 | ext = None | |
533 | # first, search for an extension with the same name as the command |
|
552 | # first, search for an extension with the same name as the command | |
534 | path = paths.pop(cmd, None) |
|
553 | path = paths.pop(cmd, None) | |
535 | if path: |
|
554 | if path: | |
536 | ext = findcmd(cmd, cmd, path) |
|
555 | ext = findcmd(cmd, cmd, path) | |
537 | if not ext: |
|
556 | if not ext: | |
538 | # otherwise, interrogate each extension until there's a match |
|
557 | # otherwise, interrogate each extension until there's a match | |
539 | for name, path in paths.iteritems(): |
|
558 | for name, path in paths.iteritems(): | |
540 | ext = findcmd(cmd, name, path) |
|
559 | ext = findcmd(cmd, name, path) | |
541 | if ext: |
|
560 | if ext: | |
542 | break |
|
561 | break | |
543 | if ext and 'DEPRECATED' not in ext.__doc__: |
|
562 | if ext and 'DEPRECATED' not in ext.__doc__: | |
544 | return ext |
|
563 | return ext | |
545 |
|
564 | |||
546 | raise error.UnknownCommand(cmd) |
|
565 | raise error.UnknownCommand(cmd) | |
547 |
|
566 | |||
548 | def enabled(shortname=True): |
|
567 | def enabled(shortname=True): | |
549 | '''return a dict of {name: desc} of extensions''' |
|
568 | '''return a dict of {name: desc} of extensions''' | |
550 | exts = {} |
|
569 | exts = {} | |
551 | for ename, ext in extensions(): |
|
570 | for ename, ext in extensions(): | |
552 | doc = (gettext(ext.__doc__) or _('(no help text available)')) |
|
571 | doc = (gettext(ext.__doc__) or _('(no help text available)')) | |
553 | if shortname: |
|
572 | if shortname: | |
554 | ename = ename.split('.')[-1] |
|
573 | ename = ename.split('.')[-1] | |
555 | exts[ename] = doc.splitlines()[0].strip() |
|
574 | exts[ename] = doc.splitlines()[0].strip() | |
556 |
|
575 | |||
557 | return exts |
|
576 | return exts | |
558 |
|
577 | |||
559 | def notloaded(): |
|
578 | def notloaded(): | |
560 | '''return short names of extensions that failed to load''' |
|
579 | '''return short names of extensions that failed to load''' | |
561 | return [name for name, mod in _extensions.iteritems() if mod is None] |
|
580 | return [name for name, mod in _extensions.iteritems() if mod is None] | |
562 |
|
581 | |||
563 | def moduleversion(module): |
|
582 | def moduleversion(module): | |
564 | '''return version information from given module as a string''' |
|
583 | '''return version information from given module as a string''' | |
565 | if (util.safehasattr(module, 'getversion') |
|
584 | if (util.safehasattr(module, 'getversion') | |
566 | and callable(module.getversion)): |
|
585 | and callable(module.getversion)): | |
567 | version = module.getversion() |
|
586 | version = module.getversion() | |
568 | elif util.safehasattr(module, '__version__'): |
|
587 | elif util.safehasattr(module, '__version__'): | |
569 | version = module.__version__ |
|
588 | version = module.__version__ | |
570 | else: |
|
589 | else: | |
571 | version = '' |
|
590 | version = '' | |
572 | if isinstance(version, (list, tuple)): |
|
591 | if isinstance(version, (list, tuple)): | |
573 | version = '.'.join(str(o) for o in version) |
|
592 | version = '.'.join(str(o) for o in version) | |
574 | return version |
|
593 | return version | |
575 |
|
594 | |||
576 | def ismoduleinternal(module): |
|
595 | def ismoduleinternal(module): | |
577 | exttestedwith = getattr(module, 'testedwith', None) |
|
596 | exttestedwith = getattr(module, 'testedwith', None) | |
578 | return exttestedwith == "ships-with-hg-core" |
|
597 | return exttestedwith == "ships-with-hg-core" |
General Comments 0
You need to be logged in to leave comments.
Login now