Show More
@@ -1,720 +1,716 b'' | |||||
1 | # __init__.py - fsmonitor initialization and overrides |
|
1 | # __init__.py - fsmonitor initialization and overrides | |
2 | # |
|
2 | # | |
3 | # Copyright 2013-2016 Facebook, Inc. |
|
3 | # Copyright 2013-2016 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | '''Faster status operations with the Watchman file monitor (EXPERIMENTAL) |
|
8 | '''Faster status operations with the Watchman file monitor (EXPERIMENTAL) | |
9 |
|
9 | |||
10 | Integrates the file-watching program Watchman with Mercurial to produce faster |
|
10 | Integrates the file-watching program Watchman with Mercurial to produce faster | |
11 | status results. |
|
11 | status results. | |
12 |
|
12 | |||
13 | On a particular Linux system, for a real-world repository with over 400,000 |
|
13 | On a particular Linux system, for a real-world repository with over 400,000 | |
14 | files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same |
|
14 | files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same | |
15 | system, with fsmonitor it takes about 0.3 seconds. |
|
15 | system, with fsmonitor it takes about 0.3 seconds. | |
16 |
|
16 | |||
17 | fsmonitor requires no configuration -- it will tell Watchman about your |
|
17 | fsmonitor requires no configuration -- it will tell Watchman about your | |
18 | repository as necessary. You'll need to install Watchman from |
|
18 | repository as necessary. You'll need to install Watchman from | |
19 | https://facebook.github.io/watchman/ and make sure it is in your PATH. |
|
19 | https://facebook.github.io/watchman/ and make sure it is in your PATH. | |
20 |
|
20 | |||
21 | The following configuration options exist: |
|
21 | The following configuration options exist: | |
22 |
|
22 | |||
23 | :: |
|
23 | :: | |
24 |
|
24 | |||
25 | [fsmonitor] |
|
25 | [fsmonitor] | |
26 | mode = {off, on, paranoid} |
|
26 | mode = {off, on, paranoid} | |
27 |
|
27 | |||
28 | When `mode = off`, fsmonitor will disable itself (similar to not loading the |
|
28 | When `mode = off`, fsmonitor will disable itself (similar to not loading the | |
29 | extension at all). When `mode = on`, fsmonitor will be enabled (the default). |
|
29 | extension at all). When `mode = on`, fsmonitor will be enabled (the default). | |
30 | When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem, |
|
30 | When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem, | |
31 | and ensure that the results are consistent. |
|
31 | and ensure that the results are consistent. | |
32 |
|
32 | |||
33 | :: |
|
33 | :: | |
34 |
|
34 | |||
35 | [fsmonitor] |
|
35 | [fsmonitor] | |
36 | timeout = (float) |
|
36 | timeout = (float) | |
37 |
|
37 | |||
38 | A value, in seconds, that determines how long fsmonitor will wait for Watchman |
|
38 | A value, in seconds, that determines how long fsmonitor will wait for Watchman | |
39 | to return results. Defaults to `2.0`. |
|
39 | to return results. Defaults to `2.0`. | |
40 |
|
40 | |||
41 | :: |
|
41 | :: | |
42 |
|
42 | |||
43 | [fsmonitor] |
|
43 | [fsmonitor] | |
44 | blacklistusers = (list of userids) |
|
44 | blacklistusers = (list of userids) | |
45 |
|
45 | |||
46 | A list of usernames for which fsmonitor will disable itself altogether. |
|
46 | A list of usernames for which fsmonitor will disable itself altogether. | |
47 |
|
47 | |||
48 | :: |
|
48 | :: | |
49 |
|
49 | |||
50 | [fsmonitor] |
|
50 | [fsmonitor] | |
51 | walk_on_invalidate = (boolean) |
|
51 | walk_on_invalidate = (boolean) | |
52 |
|
52 | |||
53 | Whether or not to walk the whole repo ourselves when our cached state has been |
|
53 | Whether or not to walk the whole repo ourselves when our cached state has been | |
54 | invalidated, for example when Watchman has been restarted or .hgignore rules |
|
54 | invalidated, for example when Watchman has been restarted or .hgignore rules | |
55 | have been changed. Walking the repo in that case can result in competing for |
|
55 | have been changed. Walking the repo in that case can result in competing for | |
56 | I/O with Watchman. For large repos it is recommended to set this value to |
|
56 | I/O with Watchman. For large repos it is recommended to set this value to | |
57 | false. You may wish to set this to true if you have a very fast filesystem |
|
57 | false. You may wish to set this to true if you have a very fast filesystem | |
58 | that can outpace the IPC overhead of getting the result data for the full repo |
|
58 | that can outpace the IPC overhead of getting the result data for the full repo | |
59 | from Watchman. Defaults to false. |
|
59 | from Watchman. Defaults to false. | |
60 |
|
60 | |||
61 | fsmonitor is incompatible with the largefiles and eol extensions, and |
|
61 | fsmonitor is incompatible with the largefiles and eol extensions, and | |
62 | will disable itself if any of those are active. |
|
62 | will disable itself if any of those are active. | |
63 |
|
63 | |||
64 | ''' |
|
64 | ''' | |
65 |
|
65 | |||
66 | # Platforms Supported |
|
66 | # Platforms Supported | |
67 | # =================== |
|
67 | # =================== | |
68 | # |
|
68 | # | |
69 | # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably, |
|
69 | # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably, | |
70 | # even under severe loads. |
|
70 | # even under severe loads. | |
71 | # |
|
71 | # | |
72 | # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor |
|
72 | # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor | |
73 | # turned on, on case-insensitive HFS+. There has been a reasonable amount of |
|
73 | # turned on, on case-insensitive HFS+. There has been a reasonable amount of | |
74 | # user testing under normal loads. |
|
74 | # user testing under normal loads. | |
75 | # |
|
75 | # | |
76 | # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but |
|
76 | # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but | |
77 | # very little testing has been done. |
|
77 | # very little testing has been done. | |
78 | # |
|
78 | # | |
79 | # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet. |
|
79 | # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet. | |
80 | # |
|
80 | # | |
81 | # Known Issues |
|
81 | # Known Issues | |
82 | # ============ |
|
82 | # ============ | |
83 | # |
|
83 | # | |
84 | # * fsmonitor will disable itself if any of the following extensions are |
|
84 | # * fsmonitor will disable itself if any of the following extensions are | |
85 | # enabled: largefiles, inotify, eol; or if the repository has subrepos. |
|
85 | # enabled: largefiles, inotify, eol; or if the repository has subrepos. | |
86 | # * fsmonitor will produce incorrect results if nested repos that are not |
|
86 | # * fsmonitor will produce incorrect results if nested repos that are not | |
87 | # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`. |
|
87 | # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`. | |
88 | # |
|
88 | # | |
89 | # The issues related to nested repos and subrepos are probably not fundamental |
|
89 | # The issues related to nested repos and subrepos are probably not fundamental | |
90 | # ones. Patches to fix them are welcome. |
|
90 | # ones. Patches to fix them are welcome. | |
91 |
|
91 | |||
92 | from __future__ import absolute_import |
|
92 | from __future__ import absolute_import | |
93 |
|
93 | |||
94 | import codecs |
|
94 | import codecs | |
95 | import hashlib |
|
95 | import hashlib | |
96 | import os |
|
96 | import os | |
97 | import stat |
|
97 | import stat | |
98 | import sys |
|
98 | import sys | |
99 |
|
99 | |||
100 | from mercurial.i18n import _ |
|
100 | from mercurial.i18n import _ | |
101 | from mercurial import ( |
|
101 | from mercurial import ( | |
102 | context, |
|
102 | context, | |
103 | encoding, |
|
103 | encoding, | |
104 | error, |
|
104 | error, | |
105 | extensions, |
|
105 | extensions, | |
106 | localrepo, |
|
106 | localrepo, | |
107 | merge, |
|
107 | merge, | |
108 | pathutil, |
|
108 | pathutil, | |
109 | pycompat, |
|
109 | pycompat, | |
110 | scmutil, |
|
110 | scmutil, | |
111 | util, |
|
111 | util, | |
112 | ) |
|
112 | ) | |
113 | from mercurial import match as matchmod |
|
113 | from mercurial import match as matchmod | |
114 |
|
114 | |||
115 | from . import ( |
|
115 | from . import ( | |
116 | pywatchman, |
|
116 | pywatchman, | |
117 | state, |
|
117 | state, | |
118 | watchmanclient, |
|
118 | watchmanclient, | |
119 | ) |
|
119 | ) | |
120 |
|
120 | |||
121 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
121 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
122 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
122 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
123 | # be specifying the version(s) of Mercurial they are tested with, or |
|
123 | # be specifying the version(s) of Mercurial they are tested with, or | |
124 | # leave the attribute unspecified. |
|
124 | # leave the attribute unspecified. | |
125 | testedwith = 'ships-with-hg-core' |
|
125 | testedwith = 'ships-with-hg-core' | |
126 |
|
126 | |||
127 | # This extension is incompatible with the following blacklisted extensions |
|
127 | # This extension is incompatible with the following blacklisted extensions | |
128 | # and will disable itself when encountering one of these: |
|
128 | # and will disable itself when encountering one of these: | |
129 | _blacklist = ['largefiles', 'eol'] |
|
129 | _blacklist = ['largefiles', 'eol'] | |
130 |
|
130 | |||
131 | def _handleunavailable(ui, state, ex): |
|
131 | def _handleunavailable(ui, state, ex): | |
132 | """Exception handler for Watchman interaction exceptions""" |
|
132 | """Exception handler for Watchman interaction exceptions""" | |
133 | if isinstance(ex, watchmanclient.Unavailable): |
|
133 | if isinstance(ex, watchmanclient.Unavailable): | |
134 | if ex.warn: |
|
134 | if ex.warn: | |
135 | ui.warn(str(ex) + '\n') |
|
135 | ui.warn(str(ex) + '\n') | |
136 | if ex.invalidate: |
|
136 | if ex.invalidate: | |
137 | state.invalidate() |
|
137 | state.invalidate() | |
138 | ui.log('fsmonitor', 'Watchman unavailable: %s\n', ex.msg) |
|
138 | ui.log('fsmonitor', 'Watchman unavailable: %s\n', ex.msg) | |
139 | else: |
|
139 | else: | |
140 | ui.log('fsmonitor', 'Watchman exception: %s\n', ex) |
|
140 | ui.log('fsmonitor', 'Watchman exception: %s\n', ex) | |
141 |
|
141 | |||
142 | def _hashignore(ignore): |
|
142 | def _hashignore(ignore): | |
143 | """Calculate hash for ignore patterns and filenames |
|
143 | """Calculate hash for ignore patterns and filenames | |
144 |
|
144 | |||
145 | If this information changes between Mercurial invocations, we can't |
|
145 | If this information changes between Mercurial invocations, we can't | |
146 | rely on Watchman information anymore and have to re-scan the working |
|
146 | rely on Watchman information anymore and have to re-scan the working | |
147 | copy. |
|
147 | copy. | |
148 |
|
148 | |||
149 | """ |
|
149 | """ | |
150 | sha1 = hashlib.sha1() |
|
150 | sha1 = hashlib.sha1() | |
151 | sha1.update(repr(ignore)) |
|
151 | sha1.update(repr(ignore)) | |
152 | return sha1.hexdigest() |
|
152 | return sha1.hexdigest() | |
153 |
|
153 | |||
154 | _watchmanencoding = pywatchman.encoding.get_local_encoding() |
|
154 | _watchmanencoding = pywatchman.encoding.get_local_encoding() | |
155 | _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding() |
|
155 | _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding() | |
156 | _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding) |
|
156 | _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding) | |
157 |
|
157 | |||
158 | def _watchmantofsencoding(path): |
|
158 | def _watchmantofsencoding(path): | |
159 | """Fix path to match watchman and local filesystem encoding |
|
159 | """Fix path to match watchman and local filesystem encoding | |
160 |
|
160 | |||
161 | watchman's paths encoding can differ from filesystem encoding. For example, |
|
161 | watchman's paths encoding can differ from filesystem encoding. For example, | |
162 | on Windows, it's always utf-8. |
|
162 | on Windows, it's always utf-8. | |
163 | """ |
|
163 | """ | |
164 | try: |
|
164 | try: | |
165 | decoded = path.decode(_watchmanencoding) |
|
165 | decoded = path.decode(_watchmanencoding) | |
166 | except UnicodeDecodeError as e: |
|
166 | except UnicodeDecodeError as e: | |
167 | raise error.Abort(str(e), hint='watchman encoding error') |
|
167 | raise error.Abort(str(e), hint='watchman encoding error') | |
168 |
|
168 | |||
169 | try: |
|
169 | try: | |
170 | encoded = decoded.encode(_fsencoding, 'strict') |
|
170 | encoded = decoded.encode(_fsencoding, 'strict') | |
171 | except UnicodeEncodeError as e: |
|
171 | except UnicodeEncodeError as e: | |
172 | raise error.Abort(str(e)) |
|
172 | raise error.Abort(str(e)) | |
173 |
|
173 | |||
174 | return encoded |
|
174 | return encoded | |
175 |
|
175 | |||
176 | def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True): |
|
176 | def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True): | |
177 | '''Replacement for dirstate.walk, hooking into Watchman. |
|
177 | '''Replacement for dirstate.walk, hooking into Watchman. | |
178 |
|
178 | |||
179 | Whenever full is False, ignored is False, and the Watchman client is |
|
179 | Whenever full is False, ignored is False, and the Watchman client is | |
180 | available, use Watchman combined with saved state to possibly return only a |
|
180 | available, use Watchman combined with saved state to possibly return only a | |
181 | subset of files.''' |
|
181 | subset of files.''' | |
182 | def bail(): |
|
182 | def bail(): | |
183 | return orig(match, subrepos, unknown, ignored, full=True) |
|
183 | return orig(match, subrepos, unknown, ignored, full=True) | |
184 |
|
184 | |||
185 | if full or ignored or not self._watchmanclient.available(): |
|
185 | if full or ignored or not self._watchmanclient.available(): | |
186 | return bail() |
|
186 | return bail() | |
187 | state = self._fsmonitorstate |
|
187 | state = self._fsmonitorstate | |
188 | clock, ignorehash, notefiles = state.get() |
|
188 | clock, ignorehash, notefiles = state.get() | |
189 | if not clock: |
|
189 | if not clock: | |
190 | if state.walk_on_invalidate: |
|
190 | if state.walk_on_invalidate: | |
191 | return bail() |
|
191 | return bail() | |
192 | # Initial NULL clock value, see |
|
192 | # Initial NULL clock value, see | |
193 | # https://facebook.github.io/watchman/docs/clockspec.html |
|
193 | # https://facebook.github.io/watchman/docs/clockspec.html | |
194 | clock = 'c:0:0' |
|
194 | clock = 'c:0:0' | |
195 | notefiles = [] |
|
195 | notefiles = [] | |
196 |
|
196 | |||
197 | def fwarn(f, msg): |
|
197 | def fwarn(f, msg): | |
198 | self._ui.warn('%s: %s\n' % (self.pathto(f), msg)) |
|
198 | self._ui.warn('%s: %s\n' % (self.pathto(f), msg)) | |
199 | return False |
|
199 | return False | |
200 |
|
200 | |||
201 | def badtype(mode): |
|
201 | def badtype(mode): | |
202 | kind = _('unknown') |
|
202 | kind = _('unknown') | |
203 | if stat.S_ISCHR(mode): |
|
203 | if stat.S_ISCHR(mode): | |
204 | kind = _('character device') |
|
204 | kind = _('character device') | |
205 | elif stat.S_ISBLK(mode): |
|
205 | elif stat.S_ISBLK(mode): | |
206 | kind = _('block device') |
|
206 | kind = _('block device') | |
207 | elif stat.S_ISFIFO(mode): |
|
207 | elif stat.S_ISFIFO(mode): | |
208 | kind = _('fifo') |
|
208 | kind = _('fifo') | |
209 | elif stat.S_ISSOCK(mode): |
|
209 | elif stat.S_ISSOCK(mode): | |
210 | kind = _('socket') |
|
210 | kind = _('socket') | |
211 | elif stat.S_ISDIR(mode): |
|
211 | elif stat.S_ISDIR(mode): | |
212 | kind = _('directory') |
|
212 | kind = _('directory') | |
213 | return _('unsupported file type (type is %s)') % kind |
|
213 | return _('unsupported file type (type is %s)') % kind | |
214 |
|
214 | |||
215 | ignore = self._ignore |
|
215 | ignore = self._ignore | |
216 | dirignore = self._dirignore |
|
216 | dirignore = self._dirignore | |
217 | if unknown: |
|
217 | if unknown: | |
218 | if _hashignore(ignore) != ignorehash and clock != 'c:0:0': |
|
218 | if _hashignore(ignore) != ignorehash and clock != 'c:0:0': | |
219 | # ignore list changed -- can't rely on Watchman state any more |
|
219 | # ignore list changed -- can't rely on Watchman state any more | |
220 | if state.walk_on_invalidate: |
|
220 | if state.walk_on_invalidate: | |
221 | return bail() |
|
221 | return bail() | |
222 | notefiles = [] |
|
222 | notefiles = [] | |
223 | clock = 'c:0:0' |
|
223 | clock = 'c:0:0' | |
224 | else: |
|
224 | else: | |
225 | # always ignore |
|
225 | # always ignore | |
226 | ignore = util.always |
|
226 | ignore = util.always | |
227 | dirignore = util.always |
|
227 | dirignore = util.always | |
228 |
|
228 | |||
229 | matchfn = match.matchfn |
|
229 | matchfn = match.matchfn | |
230 | matchalways = match.always() |
|
230 | matchalways = match.always() | |
231 | dmap = self._map |
|
231 | dmap = self._map | |
232 | nonnormalset = getattr(self, '_nonnormalset', None) |
|
232 | nonnormalset = getattr(self, '_nonnormalset', None) | |
233 |
|
233 | |||
234 | copymap = self._copymap |
|
234 | copymap = self._copymap | |
235 | getkind = stat.S_IFMT |
|
235 | getkind = stat.S_IFMT | |
236 | dirkind = stat.S_IFDIR |
|
236 | dirkind = stat.S_IFDIR | |
237 | regkind = stat.S_IFREG |
|
237 | regkind = stat.S_IFREG | |
238 | lnkkind = stat.S_IFLNK |
|
238 | lnkkind = stat.S_IFLNK | |
239 | join = self._join |
|
239 | join = self._join | |
240 | normcase = util.normcase |
|
240 | normcase = util.normcase | |
241 | fresh_instance = False |
|
241 | fresh_instance = False | |
242 |
|
242 | |||
243 | exact = skipstep3 = False |
|
243 | exact = skipstep3 = False | |
244 | if match.isexact(): # match.exact |
|
244 | if match.isexact(): # match.exact | |
245 | exact = True |
|
245 | exact = True | |
246 | dirignore = util.always # skip step 2 |
|
246 | dirignore = util.always # skip step 2 | |
247 | elif match.prefix(): # match.match, no patterns |
|
247 | elif match.prefix(): # match.match, no patterns | |
248 | skipstep3 = True |
|
248 | skipstep3 = True | |
249 |
|
249 | |||
250 | if not exact and self._checkcase: |
|
250 | if not exact and self._checkcase: | |
251 | # note that even though we could receive directory entries, we're only |
|
251 | # note that even though we could receive directory entries, we're only | |
252 | # interested in checking if a file with the same name exists. So only |
|
252 | # interested in checking if a file with the same name exists. So only | |
253 | # normalize files if possible. |
|
253 | # normalize files if possible. | |
254 | normalize = self._normalizefile |
|
254 | normalize = self._normalizefile | |
255 | skipstep3 = False |
|
255 | skipstep3 = False | |
256 | else: |
|
256 | else: | |
257 | normalize = None |
|
257 | normalize = None | |
258 |
|
258 | |||
259 | # step 1: find all explicit files |
|
259 | # step 1: find all explicit files | |
260 | results, work, dirsnotfound = self._walkexplicit(match, subrepos) |
|
260 | results, work, dirsnotfound = self._walkexplicit(match, subrepos) | |
261 |
|
261 | |||
262 | skipstep3 = skipstep3 and not (work or dirsnotfound) |
|
262 | skipstep3 = skipstep3 and not (work or dirsnotfound) | |
263 | work = [d for d in work if not dirignore(d[0])] |
|
263 | work = [d for d in work if not dirignore(d[0])] | |
264 |
|
264 | |||
265 | if not work and (exact or skipstep3): |
|
265 | if not work and (exact or skipstep3): | |
266 | for s in subrepos: |
|
266 | for s in subrepos: | |
267 | del results[s] |
|
267 | del results[s] | |
268 | del results['.hg'] |
|
268 | del results['.hg'] | |
269 | return results |
|
269 | return results | |
270 |
|
270 | |||
271 | # step 2: query Watchman |
|
271 | # step 2: query Watchman | |
272 | try: |
|
272 | try: | |
273 | # Use the user-configured timeout for the query. |
|
273 | # Use the user-configured timeout for the query. | |
274 | # Add a little slack over the top of the user query to allow for |
|
274 | # Add a little slack over the top of the user query to allow for | |
275 | # overheads while transferring the data |
|
275 | # overheads while transferring the data | |
276 | self._watchmanclient.settimeout(state.timeout + 0.1) |
|
276 | self._watchmanclient.settimeout(state.timeout + 0.1) | |
277 | result = self._watchmanclient.command('query', { |
|
277 | result = self._watchmanclient.command('query', { | |
278 | 'fields': ['mode', 'mtime', 'size', 'exists', 'name'], |
|
278 | 'fields': ['mode', 'mtime', 'size', 'exists', 'name'], | |
279 | 'since': clock, |
|
279 | 'since': clock, | |
280 | 'expression': [ |
|
280 | 'expression': [ | |
281 | 'not', [ |
|
281 | 'not', [ | |
282 | 'anyof', ['dirname', '.hg'], |
|
282 | 'anyof', ['dirname', '.hg'], | |
283 | ['name', '.hg', 'wholename'] |
|
283 | ['name', '.hg', 'wholename'] | |
284 | ] |
|
284 | ] | |
285 | ], |
|
285 | ], | |
286 | 'sync_timeout': int(state.timeout * 1000), |
|
286 | 'sync_timeout': int(state.timeout * 1000), | |
287 | 'empty_on_fresh_instance': state.walk_on_invalidate, |
|
287 | 'empty_on_fresh_instance': state.walk_on_invalidate, | |
288 | }) |
|
288 | }) | |
289 | except Exception as ex: |
|
289 | except Exception as ex: | |
290 | _handleunavailable(self._ui, state, ex) |
|
290 | _handleunavailable(self._ui, state, ex) | |
291 | self._watchmanclient.clearconnection() |
|
291 | self._watchmanclient.clearconnection() | |
292 | return bail() |
|
292 | return bail() | |
293 | else: |
|
293 | else: | |
294 | # We need to propagate the last observed clock up so that we |
|
294 | # We need to propagate the last observed clock up so that we | |
295 | # can use it for our next query |
|
295 | # can use it for our next query | |
296 | state.setlastclock(result['clock']) |
|
296 | state.setlastclock(result['clock']) | |
297 | if result['is_fresh_instance']: |
|
297 | if result['is_fresh_instance']: | |
298 | if state.walk_on_invalidate: |
|
298 | if state.walk_on_invalidate: | |
299 | state.invalidate() |
|
299 | state.invalidate() | |
300 | return bail() |
|
300 | return bail() | |
301 | fresh_instance = True |
|
301 | fresh_instance = True | |
302 | # Ignore any prior noteable files from the state info |
|
302 | # Ignore any prior noteable files from the state info | |
303 | notefiles = [] |
|
303 | notefiles = [] | |
304 |
|
304 | |||
305 | # for file paths which require normalization and we encounter a case |
|
305 | # for file paths which require normalization and we encounter a case | |
306 | # collision, we store our own foldmap |
|
306 | # collision, we store our own foldmap | |
307 | if normalize: |
|
307 | if normalize: | |
308 | foldmap = dict((normcase(k), k) for k in results) |
|
308 | foldmap = dict((normcase(k), k) for k in results) | |
309 |
|
309 | |||
310 | switch_slashes = pycompat.ossep == '\\' |
|
310 | switch_slashes = pycompat.ossep == '\\' | |
311 | # The order of the results is, strictly speaking, undefined. |
|
311 | # The order of the results is, strictly speaking, undefined. | |
312 | # For case changes on a case insensitive filesystem we may receive |
|
312 | # For case changes on a case insensitive filesystem we may receive | |
313 | # two entries, one with exists=True and another with exists=False. |
|
313 | # two entries, one with exists=True and another with exists=False. | |
314 | # The exists=True entries in the same response should be interpreted |
|
314 | # The exists=True entries in the same response should be interpreted | |
315 | # as being happens-after the exists=False entries due to the way that |
|
315 | # as being happens-after the exists=False entries due to the way that | |
316 | # Watchman tracks files. We use this property to reconcile deletes |
|
316 | # Watchman tracks files. We use this property to reconcile deletes | |
317 | # for name case changes. |
|
317 | # for name case changes. | |
318 | for entry in result['files']: |
|
318 | for entry in result['files']: | |
319 | fname = entry['name'] |
|
319 | fname = entry['name'] | |
320 | if _fixencoding: |
|
320 | if _fixencoding: | |
321 | fname = _watchmantofsencoding(fname) |
|
321 | fname = _watchmantofsencoding(fname) | |
322 | if switch_slashes: |
|
322 | if switch_slashes: | |
323 | fname = fname.replace('\\', '/') |
|
323 | fname = fname.replace('\\', '/') | |
324 | if normalize: |
|
324 | if normalize: | |
325 | normed = normcase(fname) |
|
325 | normed = normcase(fname) | |
326 | fname = normalize(fname, True, True) |
|
326 | fname = normalize(fname, True, True) | |
327 | foldmap[normed] = fname |
|
327 | foldmap[normed] = fname | |
328 | fmode = entry['mode'] |
|
328 | fmode = entry['mode'] | |
329 | fexists = entry['exists'] |
|
329 | fexists = entry['exists'] | |
330 | kind = getkind(fmode) |
|
330 | kind = getkind(fmode) | |
331 |
|
331 | |||
332 | if not fexists: |
|
332 | if not fexists: | |
333 | # if marked as deleted and we don't already have a change |
|
333 | # if marked as deleted and we don't already have a change | |
334 | # record, mark it as deleted. If we already have an entry |
|
334 | # record, mark it as deleted. If we already have an entry | |
335 | # for fname then it was either part of walkexplicit or was |
|
335 | # for fname then it was either part of walkexplicit or was | |
336 | # an earlier result that was a case change |
|
336 | # an earlier result that was a case change | |
337 | if fname not in results and fname in dmap and ( |
|
337 | if fname not in results and fname in dmap and ( | |
338 | matchalways or matchfn(fname)): |
|
338 | matchalways or matchfn(fname)): | |
339 | results[fname] = None |
|
339 | results[fname] = None | |
340 | elif kind == dirkind: |
|
340 | elif kind == dirkind: | |
341 | if fname in dmap and (matchalways or matchfn(fname)): |
|
341 | if fname in dmap and (matchalways or matchfn(fname)): | |
342 | results[fname] = None |
|
342 | results[fname] = None | |
343 | elif kind == regkind or kind == lnkkind: |
|
343 | elif kind == regkind or kind == lnkkind: | |
344 | if fname in dmap: |
|
344 | if fname in dmap: | |
345 | if matchalways or matchfn(fname): |
|
345 | if matchalways or matchfn(fname): | |
346 | results[fname] = entry |
|
346 | results[fname] = entry | |
347 | elif (matchalways or matchfn(fname)) and not ignore(fname): |
|
347 | elif (matchalways or matchfn(fname)) and not ignore(fname): | |
348 | results[fname] = entry |
|
348 | results[fname] = entry | |
349 | elif fname in dmap and (matchalways or matchfn(fname)): |
|
349 | elif fname in dmap and (matchalways or matchfn(fname)): | |
350 | results[fname] = None |
|
350 | results[fname] = None | |
351 |
|
351 | |||
352 | # step 3: query notable files we don't already know about |
|
352 | # step 3: query notable files we don't already know about | |
353 | # XXX try not to iterate over the entire dmap |
|
353 | # XXX try not to iterate over the entire dmap | |
354 | if normalize: |
|
354 | if normalize: | |
355 | # any notable files that have changed case will already be handled |
|
355 | # any notable files that have changed case will already be handled | |
356 | # above, so just check membership in the foldmap |
|
356 | # above, so just check membership in the foldmap | |
357 | notefiles = set((normalize(f, True, True) for f in notefiles |
|
357 | notefiles = set((normalize(f, True, True) for f in notefiles | |
358 | if normcase(f) not in foldmap)) |
|
358 | if normcase(f) not in foldmap)) | |
359 | visit = set((f for f in notefiles if (f not in results and matchfn(f) |
|
359 | visit = set((f for f in notefiles if (f not in results and matchfn(f) | |
360 | and (f in dmap or not ignore(f))))) |
|
360 | and (f in dmap or not ignore(f))))) | |
361 |
|
361 | |||
362 | if nonnormalset is not None and not fresh_instance: |
|
362 | if nonnormalset is not None and not fresh_instance: | |
363 | if matchalways: |
|
363 | if matchalways: | |
364 | visit.update(f for f in nonnormalset if f not in results) |
|
364 | visit.update(f for f in nonnormalset if f not in results) | |
365 | visit.update(f for f in copymap if f not in results) |
|
365 | visit.update(f for f in copymap if f not in results) | |
366 | else: |
|
366 | else: | |
367 | visit.update(f for f in nonnormalset |
|
367 | visit.update(f for f in nonnormalset | |
368 | if f not in results and matchfn(f)) |
|
368 | if f not in results and matchfn(f)) | |
369 | visit.update(f for f in copymap |
|
369 | visit.update(f for f in copymap | |
370 | if f not in results and matchfn(f)) |
|
370 | if f not in results and matchfn(f)) | |
371 | else: |
|
371 | else: | |
372 | if matchalways: |
|
372 | if matchalways: | |
373 | visit.update(f for f, st in dmap.iteritems() |
|
373 | visit.update(f for f, st in dmap.iteritems() | |
374 | if (f not in results and |
|
374 | if (f not in results and | |
375 | (st[2] < 0 or st[0] != 'n' or fresh_instance))) |
|
375 | (st[2] < 0 or st[0] != 'n' or fresh_instance))) | |
376 | visit.update(f for f in copymap if f not in results) |
|
376 | visit.update(f for f in copymap if f not in results) | |
377 | else: |
|
377 | else: | |
378 | visit.update(f for f, st in dmap.iteritems() |
|
378 | visit.update(f for f, st in dmap.iteritems() | |
379 | if (f not in results and |
|
379 | if (f not in results and | |
380 | (st[2] < 0 or st[0] != 'n' or fresh_instance) |
|
380 | (st[2] < 0 or st[0] != 'n' or fresh_instance) | |
381 | and matchfn(f))) |
|
381 | and matchfn(f))) | |
382 | visit.update(f for f in copymap |
|
382 | visit.update(f for f in copymap | |
383 | if f not in results and matchfn(f)) |
|
383 | if f not in results and matchfn(f)) | |
384 |
|
384 | |||
385 | audit = pathutil.pathauditor(self._root).check |
|
385 | audit = pathutil.pathauditor(self._root).check | |
386 | auditpass = [f for f in visit if audit(f)] |
|
386 | auditpass = [f for f in visit if audit(f)] | |
387 | auditpass.sort() |
|
387 | auditpass.sort() | |
388 | auditfail = visit.difference(auditpass) |
|
388 | auditfail = visit.difference(auditpass) | |
389 | for f in auditfail: |
|
389 | for f in auditfail: | |
390 | results[f] = None |
|
390 | results[f] = None | |
391 |
|
391 | |||
392 | nf = iter(auditpass).next |
|
392 | nf = iter(auditpass).next | |
393 | for st in util.statfiles([join(f) for f in auditpass]): |
|
393 | for st in util.statfiles([join(f) for f in auditpass]): | |
394 | f = nf() |
|
394 | f = nf() | |
395 | if st or f in dmap: |
|
395 | if st or f in dmap: | |
396 | results[f] = st |
|
396 | results[f] = st | |
397 |
|
397 | |||
398 | for s in subrepos: |
|
398 | for s in subrepos: | |
399 | del results[s] |
|
399 | del results[s] | |
400 | del results['.hg'] |
|
400 | del results['.hg'] | |
401 | return results |
|
401 | return results | |
402 |
|
402 | |||
403 | def overridestatus( |
|
403 | def overridestatus( | |
404 | orig, self, node1='.', node2=None, match=None, ignored=False, |
|
404 | orig, self, node1='.', node2=None, match=None, ignored=False, | |
405 | clean=False, unknown=False, listsubrepos=False): |
|
405 | clean=False, unknown=False, listsubrepos=False): | |
406 | listignored = ignored |
|
406 | listignored = ignored | |
407 | listclean = clean |
|
407 | listclean = clean | |
408 | listunknown = unknown |
|
408 | listunknown = unknown | |
409 |
|
409 | |||
410 | def _cmpsets(l1, l2): |
|
410 | def _cmpsets(l1, l2): | |
411 | try: |
|
411 | try: | |
412 | if 'FSMONITOR_LOG_FILE' in encoding.environ: |
|
412 | if 'FSMONITOR_LOG_FILE' in encoding.environ: | |
413 | fn = encoding.environ['FSMONITOR_LOG_FILE'] |
|
413 | fn = encoding.environ['FSMONITOR_LOG_FILE'] | |
414 | f = open(fn, 'wb') |
|
414 | f = open(fn, 'wb') | |
415 | else: |
|
415 | else: | |
416 | fn = 'fsmonitorfail.log' |
|
416 | fn = 'fsmonitorfail.log' | |
417 | f = self.opener(fn, 'wb') |
|
417 | f = self.opener(fn, 'wb') | |
418 | except (IOError, OSError): |
|
418 | except (IOError, OSError): | |
419 | self.ui.warn(_('warning: unable to write to %s\n') % fn) |
|
419 | self.ui.warn(_('warning: unable to write to %s\n') % fn) | |
420 | return |
|
420 | return | |
421 |
|
421 | |||
422 | try: |
|
422 | try: | |
423 | for i, (s1, s2) in enumerate(zip(l1, l2)): |
|
423 | for i, (s1, s2) in enumerate(zip(l1, l2)): | |
424 | if set(s1) != set(s2): |
|
424 | if set(s1) != set(s2): | |
425 | f.write('sets at position %d are unequal\n' % i) |
|
425 | f.write('sets at position %d are unequal\n' % i) | |
426 | f.write('watchman returned: %s\n' % s1) |
|
426 | f.write('watchman returned: %s\n' % s1) | |
427 | f.write('stat returned: %s\n' % s2) |
|
427 | f.write('stat returned: %s\n' % s2) | |
428 | finally: |
|
428 | finally: | |
429 | f.close() |
|
429 | f.close() | |
430 |
|
430 | |||
431 | if isinstance(node1, context.changectx): |
|
431 | if isinstance(node1, context.changectx): | |
432 | ctx1 = node1 |
|
432 | ctx1 = node1 | |
433 | else: |
|
433 | else: | |
434 | ctx1 = self[node1] |
|
434 | ctx1 = self[node1] | |
435 | if isinstance(node2, context.changectx): |
|
435 | if isinstance(node2, context.changectx): | |
436 | ctx2 = node2 |
|
436 | ctx2 = node2 | |
437 | else: |
|
437 | else: | |
438 | ctx2 = self[node2] |
|
438 | ctx2 = self[node2] | |
439 |
|
439 | |||
440 | working = ctx2.rev() is None |
|
440 | working = ctx2.rev() is None | |
441 | parentworking = working and ctx1 == self['.'] |
|
441 | parentworking = working and ctx1 == self['.'] | |
442 | match = match or matchmod.always(self.root, self.getcwd()) |
|
442 | match = match or matchmod.always(self.root, self.getcwd()) | |
443 |
|
443 | |||
444 | # Maybe we can use this opportunity to update Watchman's state. |
|
444 | # Maybe we can use this opportunity to update Watchman's state. | |
445 | # Mercurial uses workingcommitctx and/or memctx to represent the part of |
|
445 | # Mercurial uses workingcommitctx and/or memctx to represent the part of | |
446 | # the workingctx that is to be committed. So don't update the state in |
|
446 | # the workingctx that is to be committed. So don't update the state in | |
447 | # that case. |
|
447 | # that case. | |
448 | # HG_PENDING is set in the environment when the dirstate is being updated |
|
448 | # HG_PENDING is set in the environment when the dirstate is being updated | |
449 | # in the middle of a transaction; we must not update our state in that |
|
449 | # in the middle of a transaction; we must not update our state in that | |
450 | # case, or we risk forgetting about changes in the working copy. |
|
450 | # case, or we risk forgetting about changes in the working copy. | |
451 | updatestate = (parentworking and match.always() and |
|
451 | updatestate = (parentworking and match.always() and | |
452 | not isinstance(ctx2, (context.workingcommitctx, |
|
452 | not isinstance(ctx2, (context.workingcommitctx, | |
453 | context.memctx)) and |
|
453 | context.memctx)) and | |
454 | 'HG_PENDING' not in encoding.environ) |
|
454 | 'HG_PENDING' not in encoding.environ) | |
455 |
|
455 | |||
456 | try: |
|
456 | try: | |
457 | if self._fsmonitorstate.walk_on_invalidate: |
|
457 | if self._fsmonitorstate.walk_on_invalidate: | |
458 | # Use a short timeout to query the current clock. If that |
|
458 | # Use a short timeout to query the current clock. If that | |
459 | # takes too long then we assume that the service will be slow |
|
459 | # takes too long then we assume that the service will be slow | |
460 | # to answer our query. |
|
460 | # to answer our query. | |
461 | # walk_on_invalidate indicates that we prefer to walk the |
|
461 | # walk_on_invalidate indicates that we prefer to walk the | |
462 | # tree ourselves because we can ignore portions that Watchman |
|
462 | # tree ourselves because we can ignore portions that Watchman | |
463 | # cannot and we tend to be faster in the warmer buffer cache |
|
463 | # cannot and we tend to be faster in the warmer buffer cache | |
464 | # cases. |
|
464 | # cases. | |
465 | self._watchmanclient.settimeout(0.1) |
|
465 | self._watchmanclient.settimeout(0.1) | |
466 | else: |
|
466 | else: | |
467 | # Give Watchman more time to potentially complete its walk |
|
467 | # Give Watchman more time to potentially complete its walk | |
468 | # and return the initial clock. In this mode we assume that |
|
468 | # and return the initial clock. In this mode we assume that | |
469 | # the filesystem will be slower than parsing a potentially |
|
469 | # the filesystem will be slower than parsing a potentially | |
470 | # very large Watchman result set. |
|
470 | # very large Watchman result set. | |
471 | self._watchmanclient.settimeout( |
|
471 | self._watchmanclient.settimeout( | |
472 | self._fsmonitorstate.timeout + 0.1) |
|
472 | self._fsmonitorstate.timeout + 0.1) | |
473 | startclock = self._watchmanclient.getcurrentclock() |
|
473 | startclock = self._watchmanclient.getcurrentclock() | |
474 | except Exception as ex: |
|
474 | except Exception as ex: | |
475 | self._watchmanclient.clearconnection() |
|
475 | self._watchmanclient.clearconnection() | |
476 | _handleunavailable(self.ui, self._fsmonitorstate, ex) |
|
476 | _handleunavailable(self.ui, self._fsmonitorstate, ex) | |
477 | # boo, Watchman failed. bail |
|
477 | # boo, Watchman failed. bail | |
478 | return orig(node1, node2, match, listignored, listclean, |
|
478 | return orig(node1, node2, match, listignored, listclean, | |
479 | listunknown, listsubrepos) |
|
479 | listunknown, listsubrepos) | |
480 |
|
480 | |||
481 | if updatestate: |
|
481 | if updatestate: | |
482 | # We need info about unknown files. This may make things slower the |
|
482 | # We need info about unknown files. This may make things slower the | |
483 | # first time, but whatever. |
|
483 | # first time, but whatever. | |
484 | stateunknown = True |
|
484 | stateunknown = True | |
485 | else: |
|
485 | else: | |
486 | stateunknown = listunknown |
|
486 | stateunknown = listunknown | |
487 |
|
487 | |||
488 | if updatestate: |
|
488 | if updatestate: | |
489 | ps = poststatus(startclock) |
|
489 | ps = poststatus(startclock) | |
490 | self.addpostdsstatus(ps) |
|
490 | self.addpostdsstatus(ps) | |
491 |
|
491 | |||
492 | r = orig(node1, node2, match, listignored, listclean, stateunknown, |
|
492 | r = orig(node1, node2, match, listignored, listclean, stateunknown, | |
493 | listsubrepos) |
|
493 | listsubrepos) | |
494 | modified, added, removed, deleted, unknown, ignored, clean = r |
|
494 | modified, added, removed, deleted, unknown, ignored, clean = r | |
495 |
|
495 | |||
496 | if not listunknown: |
|
496 | if not listunknown: | |
497 | unknown = [] |
|
497 | unknown = [] | |
498 |
|
498 | |||
499 | # don't do paranoid checks if we're not going to query Watchman anyway |
|
499 | # don't do paranoid checks if we're not going to query Watchman anyway | |
500 | full = listclean or match.traversedir is not None |
|
500 | full = listclean or match.traversedir is not None | |
501 | if self._fsmonitorstate.mode == 'paranoid' and not full: |
|
501 | if self._fsmonitorstate.mode == 'paranoid' and not full: | |
502 | # run status again and fall back to the old walk this time |
|
502 | # run status again and fall back to the old walk this time | |
503 | self.dirstate._fsmonitordisable = True |
|
503 | self.dirstate._fsmonitordisable = True | |
504 |
|
504 | |||
505 | # shut the UI up |
|
505 | # shut the UI up | |
506 | quiet = self.ui.quiet |
|
506 | quiet = self.ui.quiet | |
507 | self.ui.quiet = True |
|
507 | self.ui.quiet = True | |
508 | fout, ferr = self.ui.fout, self.ui.ferr |
|
508 | fout, ferr = self.ui.fout, self.ui.ferr | |
509 | self.ui.fout = self.ui.ferr = open(os.devnull, 'wb') |
|
509 | self.ui.fout = self.ui.ferr = open(os.devnull, 'wb') | |
510 |
|
510 | |||
511 | try: |
|
511 | try: | |
512 | rv2 = orig( |
|
512 | rv2 = orig( | |
513 | node1, node2, match, listignored, listclean, listunknown, |
|
513 | node1, node2, match, listignored, listclean, listunknown, | |
514 | listsubrepos) |
|
514 | listsubrepos) | |
515 | finally: |
|
515 | finally: | |
516 | self.dirstate._fsmonitordisable = False |
|
516 | self.dirstate._fsmonitordisable = False | |
517 | self.ui.quiet = quiet |
|
517 | self.ui.quiet = quiet | |
518 | self.ui.fout, self.ui.ferr = fout, ferr |
|
518 | self.ui.fout, self.ui.ferr = fout, ferr | |
519 |
|
519 | |||
520 | # clean isn't tested since it's set to True above |
|
520 | # clean isn't tested since it's set to True above | |
521 | _cmpsets([modified, added, removed, deleted, unknown, ignored, clean], |
|
521 | _cmpsets([modified, added, removed, deleted, unknown, ignored, clean], | |
522 | rv2) |
|
522 | rv2) | |
523 | modified, added, removed, deleted, unknown, ignored, clean = rv2 |
|
523 | modified, added, removed, deleted, unknown, ignored, clean = rv2 | |
524 |
|
524 | |||
525 | return scmutil.status( |
|
525 | return scmutil.status( | |
526 | modified, added, removed, deleted, unknown, ignored, clean) |
|
526 | modified, added, removed, deleted, unknown, ignored, clean) | |
527 |
|
527 | |||
528 | class poststatus(object): |
|
528 | class poststatus(object): | |
529 | def __init__(self, startclock): |
|
529 | def __init__(self, startclock): | |
530 | self._startclock = startclock |
|
530 | self._startclock = startclock | |
531 |
|
531 | |||
532 | def __call__(self, wctx, status): |
|
532 | def __call__(self, wctx, status): | |
533 | clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock |
|
533 | clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock | |
534 | hashignore = _hashignore(wctx.repo().dirstate._ignore) |
|
534 | hashignore = _hashignore(wctx.repo().dirstate._ignore) | |
535 | notefiles = (status.modified + status.added + status.removed + |
|
535 | notefiles = (status.modified + status.added + status.removed + | |
536 | status.deleted + status.unknown) |
|
536 | status.deleted + status.unknown) | |
537 | wctx.repo()._fsmonitorstate.set(clock, hashignore, notefiles) |
|
537 | wctx.repo()._fsmonitorstate.set(clock, hashignore, notefiles) | |
538 |
|
538 | |||
539 | def makedirstate(cls): |
|
539 | def makedirstate(cls): | |
540 | class fsmonitordirstate(cls): |
|
540 | class fsmonitordirstate(cls): | |
541 | def _fsmonitorinit(self, fsmonitorstate, watchmanclient): |
|
541 | def _fsmonitorinit(self, fsmonitorstate, watchmanclient): | |
542 | # _fsmonitordisable is used in paranoid mode |
|
542 | # _fsmonitordisable is used in paranoid mode | |
543 | self._fsmonitordisable = False |
|
543 | self._fsmonitordisable = False | |
544 | self._fsmonitorstate = fsmonitorstate |
|
544 | self._fsmonitorstate = fsmonitorstate | |
545 | self._watchmanclient = watchmanclient |
|
545 | self._watchmanclient = watchmanclient | |
546 |
|
546 | |||
547 | def walk(self, *args, **kwargs): |
|
547 | def walk(self, *args, **kwargs): | |
548 | orig = super(fsmonitordirstate, self).walk |
|
548 | orig = super(fsmonitordirstate, self).walk | |
549 | if self._fsmonitordisable: |
|
549 | if self._fsmonitordisable: | |
550 | return orig(*args, **kwargs) |
|
550 | return orig(*args, **kwargs) | |
551 | return overridewalk(orig, self, *args, **kwargs) |
|
551 | return overridewalk(orig, self, *args, **kwargs) | |
552 |
|
552 | |||
553 | def rebuild(self, *args, **kwargs): |
|
553 | def rebuild(self, *args, **kwargs): | |
554 | self._fsmonitorstate.invalidate() |
|
554 | self._fsmonitorstate.invalidate() | |
555 | return super(fsmonitordirstate, self).rebuild(*args, **kwargs) |
|
555 | return super(fsmonitordirstate, self).rebuild(*args, **kwargs) | |
556 |
|
556 | |||
557 | def invalidate(self, *args, **kwargs): |
|
557 | def invalidate(self, *args, **kwargs): | |
558 | self._fsmonitorstate.invalidate() |
|
558 | self._fsmonitorstate.invalidate() | |
559 | return super(fsmonitordirstate, self).invalidate(*args, **kwargs) |
|
559 | return super(fsmonitordirstate, self).invalidate(*args, **kwargs) | |
560 |
|
560 | |||
561 | return fsmonitordirstate |
|
561 | return fsmonitordirstate | |
562 |
|
562 | |||
563 | def wrapdirstate(orig, self): |
|
563 | def wrapdirstate(orig, self): | |
564 | ds = orig(self) |
|
564 | ds = orig(self) | |
565 | # only override the dirstate when Watchman is available for the repo |
|
565 | # only override the dirstate when Watchman is available for the repo | |
566 | if util.safehasattr(self, '_fsmonitorstate'): |
|
566 | if util.safehasattr(self, '_fsmonitorstate'): | |
567 | ds.__class__ = makedirstate(ds.__class__) |
|
567 | ds.__class__ = makedirstate(ds.__class__) | |
568 | ds._fsmonitorinit(self._fsmonitorstate, self._watchmanclient) |
|
568 | ds._fsmonitorinit(self._fsmonitorstate, self._watchmanclient) | |
569 | return ds |
|
569 | return ds | |
570 |
|
570 | |||
571 | def extsetup(ui): |
|
571 | def extsetup(ui): | |
572 | extensions.wrapfilecache( |
|
572 | extensions.wrapfilecache( | |
573 | localrepo.localrepository, 'dirstate', wrapdirstate) |
|
573 | localrepo.localrepository, 'dirstate', wrapdirstate) | |
574 | if pycompat.sysplatform == 'darwin': |
|
574 | if pycompat.sysplatform == 'darwin': | |
575 | # An assist for avoiding the dangling-symlink fsevents bug |
|
575 | # An assist for avoiding the dangling-symlink fsevents bug | |
576 | extensions.wrapfunction(os, 'symlink', wrapsymlink) |
|
576 | extensions.wrapfunction(os, 'symlink', wrapsymlink) | |
577 |
|
577 | |||
578 | extensions.wrapfunction(merge, 'update', wrapupdate) |
|
578 | extensions.wrapfunction(merge, 'update', wrapupdate) | |
579 |
|
579 | |||
580 | def wrapsymlink(orig, source, link_name): |
|
580 | def wrapsymlink(orig, source, link_name): | |
581 | ''' if we create a dangling symlink, also touch the parent dir |
|
581 | ''' if we create a dangling symlink, also touch the parent dir | |
582 | to encourage fsevents notifications to work more correctly ''' |
|
582 | to encourage fsevents notifications to work more correctly ''' | |
583 | try: |
|
583 | try: | |
584 | return orig(source, link_name) |
|
584 | return orig(source, link_name) | |
585 | finally: |
|
585 | finally: | |
586 | try: |
|
586 | try: | |
587 | os.utime(os.path.dirname(link_name), None) |
|
587 | os.utime(os.path.dirname(link_name), None) | |
588 | except OSError: |
|
588 | except OSError: | |
589 | pass |
|
589 | pass | |
590 |
|
590 | |||
591 | class state_update(object): |
|
591 | class state_update(object): | |
592 | ''' This context manager is responsible for dispatching the state-enter |
|
592 | ''' This context manager is responsible for dispatching the state-enter | |
593 | and state-leave signals to the watchman service ''' |
|
593 | and state-leave signals to the watchman service ''' | |
594 |
|
594 | |||
595 | def __init__(self, repo, node, distance, partial): |
|
595 | def __init__(self, repo, node, distance, partial): | |
596 | self.repo = repo |
|
596 | self.repo = repo | |
597 | self.node = node |
|
597 | self.node = node | |
598 | self.distance = distance |
|
598 | self.distance = distance | |
599 | self.partial = partial |
|
599 | self.partial = partial | |
600 | self._lock = None |
|
600 | self._lock = None | |
601 | self.need_leave = False |
|
601 | self.need_leave = False | |
602 |
|
602 | |||
603 | def __enter__(self): |
|
603 | def __enter__(self): | |
604 | # We explicitly need to take a lock here, before we proceed to update |
|
604 | # We explicitly need to take a lock here, before we proceed to update | |
605 | # watchman about the update operation, so that we don't race with |
|
605 | # watchman about the update operation, so that we don't race with | |
606 | # some other actor. merge.update is going to take the wlock almost |
|
606 | # some other actor. merge.update is going to take the wlock almost | |
607 | # immediately anyway, so this is effectively extending the lock |
|
607 | # immediately anyway, so this is effectively extending the lock | |
608 | # around a couple of short sanity checks. |
|
608 | # around a couple of short sanity checks. | |
609 | self._lock = self.repo.wlock() |
|
609 | self._lock = self.repo.wlock() | |
610 | self.need_leave = self._state('state-enter') |
|
610 | self.need_leave = self._state('state-enter') | |
611 | return self |
|
611 | return self | |
612 |
|
612 | |||
613 | def __exit__(self, type_, value, tb): |
|
613 | def __exit__(self, type_, value, tb): | |
614 | try: |
|
614 | try: | |
615 | if self.need_leave: |
|
615 | if self.need_leave: | |
616 | status = 'ok' if type_ is None else 'failed' |
|
616 | status = 'ok' if type_ is None else 'failed' | |
617 | self._state('state-leave', status=status) |
|
617 | self._state('state-leave', status=status) | |
618 | finally: |
|
618 | finally: | |
619 | if self._lock: |
|
619 | if self._lock: | |
620 | self._lock.release() |
|
620 | self._lock.release() | |
621 |
|
621 | |||
622 | def _state(self, cmd, status='ok'): |
|
622 | def _state(self, cmd, status='ok'): | |
623 | if not util.safehasattr(self.repo, '_watchmanclient'): |
|
623 | if not util.safehasattr(self.repo, '_watchmanclient'): | |
624 | return False |
|
624 | return False | |
625 | try: |
|
625 | try: | |
626 | commithash = self.repo[self.node].hex() |
|
626 | commithash = self.repo[self.node].hex() | |
627 | self.repo._watchmanclient.command(cmd, { |
|
627 | self.repo._watchmanclient.command(cmd, { | |
628 | 'name': 'hg.update', |
|
628 | 'name': 'hg.update', | |
629 | 'metadata': { |
|
629 | 'metadata': { | |
630 | # the target revision |
|
630 | # the target revision | |
631 | 'rev': commithash, |
|
631 | 'rev': commithash, | |
632 | # approximate number of commits between current and target |
|
632 | # approximate number of commits between current and target | |
633 | 'distance': self.distance, |
|
633 | 'distance': self.distance, | |
634 | # success/failure (only really meaningful for state-leave) |
|
634 | # success/failure (only really meaningful for state-leave) | |
635 | 'status': status, |
|
635 | 'status': status, | |
636 | # whether the working copy parent is changing |
|
636 | # whether the working copy parent is changing | |
637 | 'partial': self.partial, |
|
637 | 'partial': self.partial, | |
638 | }}) |
|
638 | }}) | |
639 | return True |
|
639 | return True | |
640 | except Exception as e: |
|
640 | except Exception as e: | |
641 | # Swallow any errors; fire and forget |
|
641 | # Swallow any errors; fire and forget | |
642 | self.repo.ui.log( |
|
642 | self.repo.ui.log( | |
643 | 'watchman', 'Exception %s while running %s\n', e, cmd) |
|
643 | 'watchman', 'Exception %s while running %s\n', e, cmd) | |
644 | return False |
|
644 | return False | |
645 |
|
645 | |||
646 | # Bracket working copy updates with calls to the watchman state-enter |
|
646 | # Bracket working copy updates with calls to the watchman state-enter | |
647 | # and state-leave commands. This allows clients to perform more intelligent |
|
647 | # and state-leave commands. This allows clients to perform more intelligent | |
648 | # settling during bulk file change scenarios |
|
648 | # settling during bulk file change scenarios | |
649 | # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling |
|
649 | # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling | |
650 | def wrapupdate(orig, repo, node, branchmerge, force, ancestor=None, |
|
650 | def wrapupdate(orig, repo, node, branchmerge, force, ancestor=None, | |
651 | mergeancestor=False, labels=None, matcher=None, **kwargs): |
|
651 | mergeancestor=False, labels=None, matcher=None, **kwargs): | |
652 |
|
652 | |||
653 | distance = 0 |
|
653 | distance = 0 | |
654 | partial = True |
|
654 | partial = True | |
655 | if matcher is None or matcher.always(): |
|
655 | if matcher is None or matcher.always(): | |
656 | partial = False |
|
656 | partial = False | |
657 | wc = repo[None] |
|
657 | wc = repo[None] | |
658 | parents = wc.parents() |
|
658 | parents = wc.parents() | |
659 | if len(parents) == 2: |
|
659 | if len(parents) == 2: | |
660 | anc = repo.changelog.ancestor(parents[0].node(), parents[1].node()) |
|
660 | anc = repo.changelog.ancestor(parents[0].node(), parents[1].node()) | |
661 | ancrev = repo[anc].rev() |
|
661 | ancrev = repo[anc].rev() | |
662 | distance = abs(repo[node].rev() - ancrev) |
|
662 | distance = abs(repo[node].rev() - ancrev) | |
663 | elif len(parents) == 1: |
|
663 | elif len(parents) == 1: | |
664 | distance = abs(repo[node].rev() - parents[0].rev()) |
|
664 | distance = abs(repo[node].rev() - parents[0].rev()) | |
665 |
|
665 | |||
666 | with state_update(repo, node, distance, partial): |
|
666 | with state_update(repo, node, distance, partial): | |
667 | return orig( |
|
667 | return orig( | |
668 | repo, node, branchmerge, force, ancestor, mergeancestor, |
|
668 | repo, node, branchmerge, force, ancestor, mergeancestor, | |
669 | labels, matcher, **kwargs) |
|
669 | labels, matcher, **kwargs) | |
670 |
|
670 | |||
671 | def reposetup(ui, repo): |
|
671 | def reposetup(ui, repo): | |
672 | # We don't work with largefiles or inotify |
|
672 | # We don't work with largefiles or inotify | |
673 | exts = extensions.enabled() |
|
673 | exts = extensions.enabled() | |
674 | for ext in _blacklist: |
|
674 | for ext in _blacklist: | |
675 | if ext in exts: |
|
675 | if ext in exts: | |
676 | ui.warn(_('The fsmonitor extension is incompatible with the %s ' |
|
676 | ui.warn(_('The fsmonitor extension is incompatible with the %s ' | |
677 | 'extension and has been disabled.\n') % ext) |
|
677 | 'extension and has been disabled.\n') % ext) | |
678 | return |
|
678 | return | |
679 |
|
679 | |||
680 | if util.safehasattr(repo, 'dirstate'): |
|
680 | if repo.local(): | |
681 |
# We don't work with subrepos either. |
|
681 | # We don't work with subrepos either. | |
682 | # e.g. a statichttprepo, which throws on trying to access the substate. |
|
682 | # | |
683 | # XXX This sucks. |
|
683 | # if repo[None].substate can cause a dirstate parse, which is too | |
684 | try: |
|
684 | # slow. Instead, look for a file called hgsubstate, | |
685 | # if repo[None].substate can cause a dirstate parse, which is too |
|
685 | if repo.wvfs.exists('.hgsubstate') or repo.wvfs.exists('.hgsub'): | |
686 | # slow. Instead, look for a file called hgsubstate, |
|
|||
687 | if repo.wvfs.exists('.hgsubstate') or repo.wvfs.exists('.hgsub'): |
|
|||
688 | return |
|
|||
689 | except AttributeError: |
|
|||
690 | return |
|
686 | return | |
691 |
|
687 | |||
692 | fsmonitorstate = state.state(repo) |
|
688 | fsmonitorstate = state.state(repo) | |
693 | if fsmonitorstate.mode == 'off': |
|
689 | if fsmonitorstate.mode == 'off': | |
694 | return |
|
690 | return | |
695 |
|
691 | |||
696 | try: |
|
692 | try: | |
697 | client = watchmanclient.client(repo) |
|
693 | client = watchmanclient.client(repo) | |
698 | except Exception as ex: |
|
694 | except Exception as ex: | |
699 | _handleunavailable(ui, fsmonitorstate, ex) |
|
695 | _handleunavailable(ui, fsmonitorstate, ex) | |
700 | return |
|
696 | return | |
701 |
|
697 | |||
702 | repo._fsmonitorstate = fsmonitorstate |
|
698 | repo._fsmonitorstate = fsmonitorstate | |
703 | repo._watchmanclient = client |
|
699 | repo._watchmanclient = client | |
704 |
|
700 | |||
705 | # at this point since fsmonitorstate wasn't present, repo.dirstate is |
|
701 | # at this point since fsmonitorstate wasn't present, repo.dirstate is | |
706 | # not a fsmonitordirstate |
|
702 | # not a fsmonitordirstate | |
707 | dirstate = repo.dirstate |
|
703 | dirstate = repo.dirstate | |
708 | dirstate.__class__ = makedirstate(dirstate.__class__) |
|
704 | dirstate.__class__ = makedirstate(dirstate.__class__) | |
709 | dirstate._fsmonitorinit(fsmonitorstate, client) |
|
705 | dirstate._fsmonitorinit(fsmonitorstate, client) | |
710 | # invalidate property cache, but keep filecache which contains the |
|
706 | # invalidate property cache, but keep filecache which contains the | |
711 | # wrapped dirstate object |
|
707 | # wrapped dirstate object | |
712 | del repo.unfiltered().__dict__['dirstate'] |
|
708 | del repo.unfiltered().__dict__['dirstate'] | |
713 | assert dirstate is repo._filecache['dirstate'].obj |
|
709 | assert dirstate is repo._filecache['dirstate'].obj | |
714 |
|
710 | |||
715 | class fsmonitorrepo(repo.__class__): |
|
711 | class fsmonitorrepo(repo.__class__): | |
716 | def status(self, *args, **kwargs): |
|
712 | def status(self, *args, **kwargs): | |
717 | orig = super(fsmonitorrepo, self).status |
|
713 | orig = super(fsmonitorrepo, self).status | |
718 | return overridestatus(orig, self, *args, **kwargs) |
|
714 | return overridestatus(orig, self, *args, **kwargs) | |
719 |
|
715 | |||
720 | repo.__class__ = fsmonitorrepo |
|
716 | repo.__class__ = fsmonitorrepo |
General Comments 0
You need to be logged in to leave comments.
Login now