Show More
@@ -1,695 +1,696 b'' | |||||
1 | # __init__.py - fsmonitor initialization and overrides |
|
1 | # __init__.py - fsmonitor initialization and overrides | |
2 | # |
|
2 | # | |
3 | # Copyright 2013-2016 Facebook, Inc. |
|
3 | # Copyright 2013-2016 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | '''Faster status operations with the Watchman file monitor (EXPERIMENTAL) |
|
8 | '''Faster status operations with the Watchman file monitor (EXPERIMENTAL) | |
9 |
|
9 | |||
10 | Integrates the file-watching program Watchman with Mercurial to produce faster |
|
10 | Integrates the file-watching program Watchman with Mercurial to produce faster | |
11 | status results. |
|
11 | status results. | |
12 |
|
12 | |||
13 | On a particular Linux system, for a real-world repository with over 400,000 |
|
13 | On a particular Linux system, for a real-world repository with over 400,000 | |
14 | files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same |
|
14 | files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same | |
15 | system, with fsmonitor it takes about 0.3 seconds. |
|
15 | system, with fsmonitor it takes about 0.3 seconds. | |
16 |
|
16 | |||
17 | fsmonitor requires no configuration -- it will tell Watchman about your |
|
17 | fsmonitor requires no configuration -- it will tell Watchman about your | |
18 | repository as necessary. You'll need to install Watchman from |
|
18 | repository as necessary. You'll need to install Watchman from | |
19 | https://facebook.github.io/watchman/ and make sure it is in your PATH. |
|
19 | https://facebook.github.io/watchman/ and make sure it is in your PATH. | |
20 |
|
20 | |||
21 | The following configuration options exist: |
|
21 | The following configuration options exist: | |
22 |
|
22 | |||
23 | :: |
|
23 | :: | |
24 |
|
24 | |||
25 | [fsmonitor] |
|
25 | [fsmonitor] | |
26 | mode = {off, on, paranoid} |
|
26 | mode = {off, on, paranoid} | |
27 |
|
27 | |||
28 | When `mode = off`, fsmonitor will disable itself (similar to not loading the |
|
28 | When `mode = off`, fsmonitor will disable itself (similar to not loading the | |
29 | extension at all). When `mode = on`, fsmonitor will be enabled (the default). |
|
29 | extension at all). When `mode = on`, fsmonitor will be enabled (the default). | |
30 | When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem, |
|
30 | When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem, | |
31 | and ensure that the results are consistent. |
|
31 | and ensure that the results are consistent. | |
32 |
|
32 | |||
33 | :: |
|
33 | :: | |
34 |
|
34 | |||
35 | [fsmonitor] |
|
35 | [fsmonitor] | |
36 | timeout = (float) |
|
36 | timeout = (float) | |
37 |
|
37 | |||
38 | A value, in seconds, that determines how long fsmonitor will wait for Watchman |
|
38 | A value, in seconds, that determines how long fsmonitor will wait for Watchman | |
39 | to return results. Defaults to `2.0`. |
|
39 | to return results. Defaults to `2.0`. | |
40 |
|
40 | |||
41 | :: |
|
41 | :: | |
42 |
|
42 | |||
43 | [fsmonitor] |
|
43 | [fsmonitor] | |
44 | blacklistusers = (list of userids) |
|
44 | blacklistusers = (list of userids) | |
45 |
|
45 | |||
46 | A list of usernames for which fsmonitor will disable itself altogether. |
|
46 | A list of usernames for which fsmonitor will disable itself altogether. | |
47 |
|
47 | |||
48 | :: |
|
48 | :: | |
49 |
|
49 | |||
50 | [fsmonitor] |
|
50 | [fsmonitor] | |
51 | walk_on_invalidate = (boolean) |
|
51 | walk_on_invalidate = (boolean) | |
52 |
|
52 | |||
53 | Whether or not to walk the whole repo ourselves when our cached state has been |
|
53 | Whether or not to walk the whole repo ourselves when our cached state has been | |
54 | invalidated, for example when Watchman has been restarted or .hgignore rules |
|
54 | invalidated, for example when Watchman has been restarted or .hgignore rules | |
55 | have been changed. Walking the repo in that case can result in competing for |
|
55 | have been changed. Walking the repo in that case can result in competing for | |
56 | I/O with Watchman. For large repos it is recommended to set this value to |
|
56 | I/O with Watchman. For large repos it is recommended to set this value to | |
57 | false. You may wish to set this to true if you have a very fast filesystem |
|
57 | false. You may wish to set this to true if you have a very fast filesystem | |
58 | that can outpace the IPC overhead of getting the result data for the full repo |
|
58 | that can outpace the IPC overhead of getting the result data for the full repo | |
59 | from Watchman. Defaults to false. |
|
59 | from Watchman. Defaults to false. | |
60 |
|
60 | |||
61 | fsmonitor is incompatible with the largefiles and eol extensions, and |
|
61 | fsmonitor is incompatible with the largefiles and eol extensions, and | |
62 | will disable itself if any of those are active. |
|
62 | will disable itself if any of those are active. | |
63 |
|
63 | |||
64 | ''' |
|
64 | ''' | |
65 |
|
65 | |||
66 | # Platforms Supported |
|
66 | # Platforms Supported | |
67 | # =================== |
|
67 | # =================== | |
68 | # |
|
68 | # | |
69 | # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably, |
|
69 | # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably, | |
70 | # even under severe loads. |
|
70 | # even under severe loads. | |
71 | # |
|
71 | # | |
72 | # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor |
|
72 | # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor | |
73 | # turned on, on case-insensitive HFS+. There has been a reasonable amount of |
|
73 | # turned on, on case-insensitive HFS+. There has been a reasonable amount of | |
74 | # user testing under normal loads. |
|
74 | # user testing under normal loads. | |
75 | # |
|
75 | # | |
76 | # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but |
|
76 | # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but | |
77 | # very little testing has been done. |
|
77 | # very little testing has been done. | |
78 | # |
|
78 | # | |
79 | # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet. |
|
79 | # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet. | |
80 | # |
|
80 | # | |
81 | # Known Issues |
|
81 | # Known Issues | |
82 | # ============ |
|
82 | # ============ | |
83 | # |
|
83 | # | |
84 | # * fsmonitor will disable itself if any of the following extensions are |
|
84 | # * fsmonitor will disable itself if any of the following extensions are | |
85 | # enabled: largefiles, inotify, eol; or if the repository has subrepos. |
|
85 | # enabled: largefiles, inotify, eol; or if the repository has subrepos. | |
86 | # * fsmonitor will produce incorrect results if nested repos that are not |
|
86 | # * fsmonitor will produce incorrect results if nested repos that are not | |
87 | # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`. |
|
87 | # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`. | |
88 | # |
|
88 | # | |
89 | # The issues related to nested repos and subrepos are probably not fundamental |
|
89 | # The issues related to nested repos and subrepos are probably not fundamental | |
90 | # ones. Patches to fix them are welcome. |
|
90 | # ones. Patches to fix them are welcome. | |
91 |
|
91 | |||
92 | from __future__ import absolute_import |
|
92 | from __future__ import absolute_import | |
93 |
|
93 | |||
94 | import hashlib |
|
94 | import hashlib | |
95 | import os |
|
95 | import os | |
96 | import stat |
|
96 | import stat | |
97 | import sys |
|
|||
98 |
|
97 | |||
99 | from mercurial.i18n import _ |
|
98 | from mercurial.i18n import _ | |
100 | from mercurial import ( |
|
99 | from mercurial import ( | |
101 | context, |
|
100 | context, | |
|
101 | encoding, | |||
102 | extensions, |
|
102 | extensions, | |
103 | localrepo, |
|
103 | localrepo, | |
104 | merge, |
|
104 | merge, | |
105 | pathutil, |
|
105 | pathutil, | |
|
106 | pycompat, | |||
106 | scmutil, |
|
107 | scmutil, | |
107 | util, |
|
108 | util, | |
108 | ) |
|
109 | ) | |
109 | from mercurial import match as matchmod |
|
110 | from mercurial import match as matchmod | |
110 |
|
111 | |||
111 | from . import ( |
|
112 | from . import ( | |
112 | state, |
|
113 | state, | |
113 | watchmanclient, |
|
114 | watchmanclient, | |
114 | ) |
|
115 | ) | |
115 |
|
116 | |||
116 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
117 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
117 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
118 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
118 | # be specifying the version(s) of Mercurial they are tested with, or |
|
119 | # be specifying the version(s) of Mercurial they are tested with, or | |
119 | # leave the attribute unspecified. |
|
120 | # leave the attribute unspecified. | |
120 | testedwith = 'ships-with-hg-core' |
|
121 | testedwith = 'ships-with-hg-core' | |
121 |
|
122 | |||
122 | # This extension is incompatible with the following blacklisted extensions |
|
123 | # This extension is incompatible with the following blacklisted extensions | |
123 | # and will disable itself when encountering one of these: |
|
124 | # and will disable itself when encountering one of these: | |
124 | _blacklist = ['largefiles', 'eol'] |
|
125 | _blacklist = ['largefiles', 'eol'] | |
125 |
|
126 | |||
126 | def _handleunavailable(ui, state, ex): |
|
127 | def _handleunavailable(ui, state, ex): | |
127 | """Exception handler for Watchman interaction exceptions""" |
|
128 | """Exception handler for Watchman interaction exceptions""" | |
128 | if isinstance(ex, watchmanclient.Unavailable): |
|
129 | if isinstance(ex, watchmanclient.Unavailable): | |
129 | if ex.warn: |
|
130 | if ex.warn: | |
130 | ui.warn(str(ex) + '\n') |
|
131 | ui.warn(str(ex) + '\n') | |
131 | if ex.invalidate: |
|
132 | if ex.invalidate: | |
132 | state.invalidate() |
|
133 | state.invalidate() | |
133 | ui.log('fsmonitor', 'Watchman unavailable: %s\n', ex.msg) |
|
134 | ui.log('fsmonitor', 'Watchman unavailable: %s\n', ex.msg) | |
134 | else: |
|
135 | else: | |
135 | ui.log('fsmonitor', 'Watchman exception: %s\n', ex) |
|
136 | ui.log('fsmonitor', 'Watchman exception: %s\n', ex) | |
136 |
|
137 | |||
137 | def _hashignore(ignore): |
|
138 | def _hashignore(ignore): | |
138 | """Calculate hash for ignore patterns and filenames |
|
139 | """Calculate hash for ignore patterns and filenames | |
139 |
|
140 | |||
140 | If this information changes between Mercurial invocations, we can't |
|
141 | If this information changes between Mercurial invocations, we can't | |
141 | rely on Watchman information anymore and have to re-scan the working |
|
142 | rely on Watchman information anymore and have to re-scan the working | |
142 | copy. |
|
143 | copy. | |
143 |
|
144 | |||
144 | """ |
|
145 | """ | |
145 | sha1 = hashlib.sha1() |
|
146 | sha1 = hashlib.sha1() | |
146 | if util.safehasattr(ignore, 'includepat'): |
|
147 | if util.safehasattr(ignore, 'includepat'): | |
147 | sha1.update(ignore.includepat) |
|
148 | sha1.update(ignore.includepat) | |
148 | sha1.update('\0\0') |
|
149 | sha1.update('\0\0') | |
149 | if util.safehasattr(ignore, 'excludepat'): |
|
150 | if util.safehasattr(ignore, 'excludepat'): | |
150 | sha1.update(ignore.excludepat) |
|
151 | sha1.update(ignore.excludepat) | |
151 | sha1.update('\0\0') |
|
152 | sha1.update('\0\0') | |
152 | if util.safehasattr(ignore, 'patternspat'): |
|
153 | if util.safehasattr(ignore, 'patternspat'): | |
153 | sha1.update(ignore.patternspat) |
|
154 | sha1.update(ignore.patternspat) | |
154 | sha1.update('\0\0') |
|
155 | sha1.update('\0\0') | |
155 | if util.safehasattr(ignore, '_files'): |
|
156 | if util.safehasattr(ignore, '_files'): | |
156 | for f in ignore._files: |
|
157 | for f in ignore._files: | |
157 | sha1.update(f) |
|
158 | sha1.update(f) | |
158 | sha1.update('\0') |
|
159 | sha1.update('\0') | |
159 | return sha1.hexdigest() |
|
160 | return sha1.hexdigest() | |
160 |
|
161 | |||
161 | def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True): |
|
162 | def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True): | |
162 | '''Replacement for dirstate.walk, hooking into Watchman. |
|
163 | '''Replacement for dirstate.walk, hooking into Watchman. | |
163 |
|
164 | |||
164 | Whenever full is False, ignored is False, and the Watchman client is |
|
165 | Whenever full is False, ignored is False, and the Watchman client is | |
165 | available, use Watchman combined with saved state to possibly return only a |
|
166 | available, use Watchman combined with saved state to possibly return only a | |
166 | subset of files.''' |
|
167 | subset of files.''' | |
167 | def bail(): |
|
168 | def bail(): | |
168 | return orig(match, subrepos, unknown, ignored, full=True) |
|
169 | return orig(match, subrepos, unknown, ignored, full=True) | |
169 |
|
170 | |||
170 | if full or ignored or not self._watchmanclient.available(): |
|
171 | if full or ignored or not self._watchmanclient.available(): | |
171 | return bail() |
|
172 | return bail() | |
172 | state = self._fsmonitorstate |
|
173 | state = self._fsmonitorstate | |
173 | clock, ignorehash, notefiles = state.get() |
|
174 | clock, ignorehash, notefiles = state.get() | |
174 | if not clock: |
|
175 | if not clock: | |
175 | if state.walk_on_invalidate: |
|
176 | if state.walk_on_invalidate: | |
176 | return bail() |
|
177 | return bail() | |
177 | # Initial NULL clock value, see |
|
178 | # Initial NULL clock value, see | |
178 | # https://facebook.github.io/watchman/docs/clockspec.html |
|
179 | # https://facebook.github.io/watchman/docs/clockspec.html | |
179 | clock = 'c:0:0' |
|
180 | clock = 'c:0:0' | |
180 | notefiles = [] |
|
181 | notefiles = [] | |
181 |
|
182 | |||
182 | def fwarn(f, msg): |
|
183 | def fwarn(f, msg): | |
183 | self._ui.warn('%s: %s\n' % (self.pathto(f), msg)) |
|
184 | self._ui.warn('%s: %s\n' % (self.pathto(f), msg)) | |
184 | return False |
|
185 | return False | |
185 |
|
186 | |||
186 | def badtype(mode): |
|
187 | def badtype(mode): | |
187 | kind = _('unknown') |
|
188 | kind = _('unknown') | |
188 | if stat.S_ISCHR(mode): |
|
189 | if stat.S_ISCHR(mode): | |
189 | kind = _('character device') |
|
190 | kind = _('character device') | |
190 | elif stat.S_ISBLK(mode): |
|
191 | elif stat.S_ISBLK(mode): | |
191 | kind = _('block device') |
|
192 | kind = _('block device') | |
192 | elif stat.S_ISFIFO(mode): |
|
193 | elif stat.S_ISFIFO(mode): | |
193 | kind = _('fifo') |
|
194 | kind = _('fifo') | |
194 | elif stat.S_ISSOCK(mode): |
|
195 | elif stat.S_ISSOCK(mode): | |
195 | kind = _('socket') |
|
196 | kind = _('socket') | |
196 | elif stat.S_ISDIR(mode): |
|
197 | elif stat.S_ISDIR(mode): | |
197 | kind = _('directory') |
|
198 | kind = _('directory') | |
198 | return _('unsupported file type (type is %s)') % kind |
|
199 | return _('unsupported file type (type is %s)') % kind | |
199 |
|
200 | |||
200 | ignore = self._ignore |
|
201 | ignore = self._ignore | |
201 | dirignore = self._dirignore |
|
202 | dirignore = self._dirignore | |
202 | if unknown: |
|
203 | if unknown: | |
203 | if _hashignore(ignore) != ignorehash and clock != 'c:0:0': |
|
204 | if _hashignore(ignore) != ignorehash and clock != 'c:0:0': | |
204 | # ignore list changed -- can't rely on Watchman state any more |
|
205 | # ignore list changed -- can't rely on Watchman state any more | |
205 | if state.walk_on_invalidate: |
|
206 | if state.walk_on_invalidate: | |
206 | return bail() |
|
207 | return bail() | |
207 | notefiles = [] |
|
208 | notefiles = [] | |
208 | clock = 'c:0:0' |
|
209 | clock = 'c:0:0' | |
209 | else: |
|
210 | else: | |
210 | # always ignore |
|
211 | # always ignore | |
211 | ignore = util.always |
|
212 | ignore = util.always | |
212 | dirignore = util.always |
|
213 | dirignore = util.always | |
213 |
|
214 | |||
214 | matchfn = match.matchfn |
|
215 | matchfn = match.matchfn | |
215 | matchalways = match.always() |
|
216 | matchalways = match.always() | |
216 | dmap = self._map |
|
217 | dmap = self._map | |
217 | nonnormalset = getattr(self, '_nonnormalset', None) |
|
218 | nonnormalset = getattr(self, '_nonnormalset', None) | |
218 |
|
219 | |||
219 | copymap = self._copymap |
|
220 | copymap = self._copymap | |
220 | getkind = stat.S_IFMT |
|
221 | getkind = stat.S_IFMT | |
221 | dirkind = stat.S_IFDIR |
|
222 | dirkind = stat.S_IFDIR | |
222 | regkind = stat.S_IFREG |
|
223 | regkind = stat.S_IFREG | |
223 | lnkkind = stat.S_IFLNK |
|
224 | lnkkind = stat.S_IFLNK | |
224 | join = self._join |
|
225 | join = self._join | |
225 | normcase = util.normcase |
|
226 | normcase = util.normcase | |
226 | fresh_instance = False |
|
227 | fresh_instance = False | |
227 |
|
228 | |||
228 | exact = skipstep3 = False |
|
229 | exact = skipstep3 = False | |
229 | if matchfn == match.exact: # match.exact |
|
230 | if matchfn == match.exact: # match.exact | |
230 | exact = True |
|
231 | exact = True | |
231 | dirignore = util.always # skip step 2 |
|
232 | dirignore = util.always # skip step 2 | |
232 | elif match.files() and not match.anypats(): # match.match, no patterns |
|
233 | elif match.files() and not match.anypats(): # match.match, no patterns | |
233 | skipstep3 = True |
|
234 | skipstep3 = True | |
234 |
|
235 | |||
235 | if not exact and self._checkcase: |
|
236 | if not exact and self._checkcase: | |
236 | # note that even though we could receive directory entries, we're only |
|
237 | # note that even though we could receive directory entries, we're only | |
237 | # interested in checking if a file with the same name exists. So only |
|
238 | # interested in checking if a file with the same name exists. So only | |
238 | # normalize files if possible. |
|
239 | # normalize files if possible. | |
239 | normalize = self._normalizefile |
|
240 | normalize = self._normalizefile | |
240 | skipstep3 = False |
|
241 | skipstep3 = False | |
241 | else: |
|
242 | else: | |
242 | normalize = None |
|
243 | normalize = None | |
243 |
|
244 | |||
244 | # step 1: find all explicit files |
|
245 | # step 1: find all explicit files | |
245 | results, work, dirsnotfound = self._walkexplicit(match, subrepos) |
|
246 | results, work, dirsnotfound = self._walkexplicit(match, subrepos) | |
246 |
|
247 | |||
247 | skipstep3 = skipstep3 and not (work or dirsnotfound) |
|
248 | skipstep3 = skipstep3 and not (work or dirsnotfound) | |
248 | work = [d for d in work if not dirignore(d[0])] |
|
249 | work = [d for d in work if not dirignore(d[0])] | |
249 |
|
250 | |||
250 | if not work and (exact or skipstep3): |
|
251 | if not work and (exact or skipstep3): | |
251 | for s in subrepos: |
|
252 | for s in subrepos: | |
252 | del results[s] |
|
253 | del results[s] | |
253 | del results['.hg'] |
|
254 | del results['.hg'] | |
254 | return results |
|
255 | return results | |
255 |
|
256 | |||
256 | # step 2: query Watchman |
|
257 | # step 2: query Watchman | |
257 | try: |
|
258 | try: | |
258 | # Use the user-configured timeout for the query. |
|
259 | # Use the user-configured timeout for the query. | |
259 | # Add a little slack over the top of the user query to allow for |
|
260 | # Add a little slack over the top of the user query to allow for | |
260 | # overheads while transferring the data |
|
261 | # overheads while transferring the data | |
261 | self._watchmanclient.settimeout(state.timeout + 0.1) |
|
262 | self._watchmanclient.settimeout(state.timeout + 0.1) | |
262 | result = self._watchmanclient.command('query', { |
|
263 | result = self._watchmanclient.command('query', { | |
263 | 'fields': ['mode', 'mtime', 'size', 'exists', 'name'], |
|
264 | 'fields': ['mode', 'mtime', 'size', 'exists', 'name'], | |
264 | 'since': clock, |
|
265 | 'since': clock, | |
265 | 'expression': [ |
|
266 | 'expression': [ | |
266 | 'not', [ |
|
267 | 'not', [ | |
267 | 'anyof', ['dirname', '.hg'], |
|
268 | 'anyof', ['dirname', '.hg'], | |
268 | ['name', '.hg', 'wholename'] |
|
269 | ['name', '.hg', 'wholename'] | |
269 | ] |
|
270 | ] | |
270 | ], |
|
271 | ], | |
271 | 'sync_timeout': int(state.timeout * 1000), |
|
272 | 'sync_timeout': int(state.timeout * 1000), | |
272 | 'empty_on_fresh_instance': state.walk_on_invalidate, |
|
273 | 'empty_on_fresh_instance': state.walk_on_invalidate, | |
273 | }) |
|
274 | }) | |
274 | except Exception as ex: |
|
275 | except Exception as ex: | |
275 | _handleunavailable(self._ui, state, ex) |
|
276 | _handleunavailable(self._ui, state, ex) | |
276 | self._watchmanclient.clearconnection() |
|
277 | self._watchmanclient.clearconnection() | |
277 | return bail() |
|
278 | return bail() | |
278 | else: |
|
279 | else: | |
279 | # We need to propagate the last observed clock up so that we |
|
280 | # We need to propagate the last observed clock up so that we | |
280 | # can use it for our next query |
|
281 | # can use it for our next query | |
281 | state.setlastclock(result['clock']) |
|
282 | state.setlastclock(result['clock']) | |
282 | if result['is_fresh_instance']: |
|
283 | if result['is_fresh_instance']: | |
283 | if state.walk_on_invalidate: |
|
284 | if state.walk_on_invalidate: | |
284 | state.invalidate() |
|
285 | state.invalidate() | |
285 | return bail() |
|
286 | return bail() | |
286 | fresh_instance = True |
|
287 | fresh_instance = True | |
287 | # Ignore any prior noteable files from the state info |
|
288 | # Ignore any prior noteable files from the state info | |
288 | notefiles = [] |
|
289 | notefiles = [] | |
289 |
|
290 | |||
290 | # for file paths which require normalization and we encounter a case |
|
291 | # for file paths which require normalization and we encounter a case | |
291 | # collision, we store our own foldmap |
|
292 | # collision, we store our own foldmap | |
292 | if normalize: |
|
293 | if normalize: | |
293 | foldmap = dict((normcase(k), k) for k in results) |
|
294 | foldmap = dict((normcase(k), k) for k in results) | |
294 |
|
295 | |||
295 |
switch_slashes = |
|
296 | switch_slashes = pycompat.ossep == '\\' | |
296 | # The order of the results is, strictly speaking, undefined. |
|
297 | # The order of the results is, strictly speaking, undefined. | |
297 | # For case changes on a case insensitive filesystem we may receive |
|
298 | # For case changes on a case insensitive filesystem we may receive | |
298 | # two entries, one with exists=True and another with exists=False. |
|
299 | # two entries, one with exists=True and another with exists=False. | |
299 | # The exists=True entries in the same response should be interpreted |
|
300 | # The exists=True entries in the same response should be interpreted | |
300 | # as being happens-after the exists=False entries due to the way that |
|
301 | # as being happens-after the exists=False entries due to the way that | |
301 | # Watchman tracks files. We use this property to reconcile deletes |
|
302 | # Watchman tracks files. We use this property to reconcile deletes | |
302 | # for name case changes. |
|
303 | # for name case changes. | |
303 | for entry in result['files']: |
|
304 | for entry in result['files']: | |
304 | fname = entry['name'] |
|
305 | fname = entry['name'] | |
305 | if switch_slashes: |
|
306 | if switch_slashes: | |
306 | fname = fname.replace('\\', '/') |
|
307 | fname = fname.replace('\\', '/') | |
307 | if normalize: |
|
308 | if normalize: | |
308 | normed = normcase(fname) |
|
309 | normed = normcase(fname) | |
309 | fname = normalize(fname, True, True) |
|
310 | fname = normalize(fname, True, True) | |
310 | foldmap[normed] = fname |
|
311 | foldmap[normed] = fname | |
311 | fmode = entry['mode'] |
|
312 | fmode = entry['mode'] | |
312 | fexists = entry['exists'] |
|
313 | fexists = entry['exists'] | |
313 | kind = getkind(fmode) |
|
314 | kind = getkind(fmode) | |
314 |
|
315 | |||
315 | if not fexists: |
|
316 | if not fexists: | |
316 | # if marked as deleted and we don't already have a change |
|
317 | # if marked as deleted and we don't already have a change | |
317 | # record, mark it as deleted. If we already have an entry |
|
318 | # record, mark it as deleted. If we already have an entry | |
318 | # for fname then it was either part of walkexplicit or was |
|
319 | # for fname then it was either part of walkexplicit or was | |
319 | # an earlier result that was a case change |
|
320 | # an earlier result that was a case change | |
320 | if fname not in results and fname in dmap and ( |
|
321 | if fname not in results and fname in dmap and ( | |
321 | matchalways or matchfn(fname)): |
|
322 | matchalways or matchfn(fname)): | |
322 | results[fname] = None |
|
323 | results[fname] = None | |
323 | elif kind == dirkind: |
|
324 | elif kind == dirkind: | |
324 | if fname in dmap and (matchalways or matchfn(fname)): |
|
325 | if fname in dmap and (matchalways or matchfn(fname)): | |
325 | results[fname] = None |
|
326 | results[fname] = None | |
326 | elif kind == regkind or kind == lnkkind: |
|
327 | elif kind == regkind or kind == lnkkind: | |
327 | if fname in dmap: |
|
328 | if fname in dmap: | |
328 | if matchalways or matchfn(fname): |
|
329 | if matchalways or matchfn(fname): | |
329 | results[fname] = entry |
|
330 | results[fname] = entry | |
330 | elif (matchalways or matchfn(fname)) and not ignore(fname): |
|
331 | elif (matchalways or matchfn(fname)) and not ignore(fname): | |
331 | results[fname] = entry |
|
332 | results[fname] = entry | |
332 | elif fname in dmap and (matchalways or matchfn(fname)): |
|
333 | elif fname in dmap and (matchalways or matchfn(fname)): | |
333 | results[fname] = None |
|
334 | results[fname] = None | |
334 |
|
335 | |||
335 | # step 3: query notable files we don't already know about |
|
336 | # step 3: query notable files we don't already know about | |
336 | # XXX try not to iterate over the entire dmap |
|
337 | # XXX try not to iterate over the entire dmap | |
337 | if normalize: |
|
338 | if normalize: | |
338 | # any notable files that have changed case will already be handled |
|
339 | # any notable files that have changed case will already be handled | |
339 | # above, so just check membership in the foldmap |
|
340 | # above, so just check membership in the foldmap | |
340 | notefiles = set((normalize(f, True, True) for f in notefiles |
|
341 | notefiles = set((normalize(f, True, True) for f in notefiles | |
341 | if normcase(f) not in foldmap)) |
|
342 | if normcase(f) not in foldmap)) | |
342 | visit = set((f for f in notefiles if (f not in results and matchfn(f) |
|
343 | visit = set((f for f in notefiles if (f not in results and matchfn(f) | |
343 | and (f in dmap or not ignore(f))))) |
|
344 | and (f in dmap or not ignore(f))))) | |
344 |
|
345 | |||
345 | if nonnormalset is not None and not fresh_instance: |
|
346 | if nonnormalset is not None and not fresh_instance: | |
346 | if matchalways: |
|
347 | if matchalways: | |
347 | visit.update(f for f in nonnormalset if f not in results) |
|
348 | visit.update(f for f in nonnormalset if f not in results) | |
348 | visit.update(f for f in copymap if f not in results) |
|
349 | visit.update(f for f in copymap if f not in results) | |
349 | else: |
|
350 | else: | |
350 | visit.update(f for f in nonnormalset |
|
351 | visit.update(f for f in nonnormalset | |
351 | if f not in results and matchfn(f)) |
|
352 | if f not in results and matchfn(f)) | |
352 | visit.update(f for f in copymap |
|
353 | visit.update(f for f in copymap | |
353 | if f not in results and matchfn(f)) |
|
354 | if f not in results and matchfn(f)) | |
354 | else: |
|
355 | else: | |
355 | if matchalways: |
|
356 | if matchalways: | |
356 | visit.update(f for f, st in dmap.iteritems() |
|
357 | visit.update(f for f, st in dmap.iteritems() | |
357 | if (f not in results and |
|
358 | if (f not in results and | |
358 | (st[2] < 0 or st[0] != 'n' or fresh_instance))) |
|
359 | (st[2] < 0 or st[0] != 'n' or fresh_instance))) | |
359 | visit.update(f for f in copymap if f not in results) |
|
360 | visit.update(f for f in copymap if f not in results) | |
360 | else: |
|
361 | else: | |
361 | visit.update(f for f, st in dmap.iteritems() |
|
362 | visit.update(f for f, st in dmap.iteritems() | |
362 | if (f not in results and |
|
363 | if (f not in results and | |
363 | (st[2] < 0 or st[0] != 'n' or fresh_instance) |
|
364 | (st[2] < 0 or st[0] != 'n' or fresh_instance) | |
364 | and matchfn(f))) |
|
365 | and matchfn(f))) | |
365 | visit.update(f for f in copymap |
|
366 | visit.update(f for f in copymap | |
366 | if f not in results and matchfn(f)) |
|
367 | if f not in results and matchfn(f)) | |
367 |
|
368 | |||
368 | audit = pathutil.pathauditor(self._root).check |
|
369 | audit = pathutil.pathauditor(self._root).check | |
369 | auditpass = [f for f in visit if audit(f)] |
|
370 | auditpass = [f for f in visit if audit(f)] | |
370 | auditpass.sort() |
|
371 | auditpass.sort() | |
371 | auditfail = visit.difference(auditpass) |
|
372 | auditfail = visit.difference(auditpass) | |
372 | for f in auditfail: |
|
373 | for f in auditfail: | |
373 | results[f] = None |
|
374 | results[f] = None | |
374 |
|
375 | |||
375 | nf = iter(auditpass).next |
|
376 | nf = iter(auditpass).next | |
376 | for st in util.statfiles([join(f) for f in auditpass]): |
|
377 | for st in util.statfiles([join(f) for f in auditpass]): | |
377 | f = nf() |
|
378 | f = nf() | |
378 | if st or f in dmap: |
|
379 | if st or f in dmap: | |
379 | results[f] = st |
|
380 | results[f] = st | |
380 |
|
381 | |||
381 | for s in subrepos: |
|
382 | for s in subrepos: | |
382 | del results[s] |
|
383 | del results[s] | |
383 | del results['.hg'] |
|
384 | del results['.hg'] | |
384 | return results |
|
385 | return results | |
385 |
|
386 | |||
386 | def overridestatus( |
|
387 | def overridestatus( | |
387 | orig, self, node1='.', node2=None, match=None, ignored=False, |
|
388 | orig, self, node1='.', node2=None, match=None, ignored=False, | |
388 | clean=False, unknown=False, listsubrepos=False): |
|
389 | clean=False, unknown=False, listsubrepos=False): | |
389 | listignored = ignored |
|
390 | listignored = ignored | |
390 | listclean = clean |
|
391 | listclean = clean | |
391 | listunknown = unknown |
|
392 | listunknown = unknown | |
392 |
|
393 | |||
393 | def _cmpsets(l1, l2): |
|
394 | def _cmpsets(l1, l2): | |
394 | try: |
|
395 | try: | |
395 |
if 'FSMONITOR_LOG_FILE' in |
|
396 | if 'FSMONITOR_LOG_FILE' in encoding.environ: | |
396 |
fn = |
|
397 | fn = encoding.environ['FSMONITOR_LOG_FILE'] | |
397 | f = open(fn, 'wb') |
|
398 | f = open(fn, 'wb') | |
398 | else: |
|
399 | else: | |
399 | fn = 'fsmonitorfail.log' |
|
400 | fn = 'fsmonitorfail.log' | |
400 | f = self.opener(fn, 'wb') |
|
401 | f = self.opener(fn, 'wb') | |
401 | except (IOError, OSError): |
|
402 | except (IOError, OSError): | |
402 | self.ui.warn(_('warning: unable to write to %s\n') % fn) |
|
403 | self.ui.warn(_('warning: unable to write to %s\n') % fn) | |
403 | return |
|
404 | return | |
404 |
|
405 | |||
405 | try: |
|
406 | try: | |
406 | for i, (s1, s2) in enumerate(zip(l1, l2)): |
|
407 | for i, (s1, s2) in enumerate(zip(l1, l2)): | |
407 | if set(s1) != set(s2): |
|
408 | if set(s1) != set(s2): | |
408 | f.write('sets at position %d are unequal\n' % i) |
|
409 | f.write('sets at position %d are unequal\n' % i) | |
409 | f.write('watchman returned: %s\n' % s1) |
|
410 | f.write('watchman returned: %s\n' % s1) | |
410 | f.write('stat returned: %s\n' % s2) |
|
411 | f.write('stat returned: %s\n' % s2) | |
411 | finally: |
|
412 | finally: | |
412 | f.close() |
|
413 | f.close() | |
413 |
|
414 | |||
414 | if isinstance(node1, context.changectx): |
|
415 | if isinstance(node1, context.changectx): | |
415 | ctx1 = node1 |
|
416 | ctx1 = node1 | |
416 | else: |
|
417 | else: | |
417 | ctx1 = self[node1] |
|
418 | ctx1 = self[node1] | |
418 | if isinstance(node2, context.changectx): |
|
419 | if isinstance(node2, context.changectx): | |
419 | ctx2 = node2 |
|
420 | ctx2 = node2 | |
420 | else: |
|
421 | else: | |
421 | ctx2 = self[node2] |
|
422 | ctx2 = self[node2] | |
422 |
|
423 | |||
423 | working = ctx2.rev() is None |
|
424 | working = ctx2.rev() is None | |
424 | parentworking = working and ctx1 == self['.'] |
|
425 | parentworking = working and ctx1 == self['.'] | |
425 | match = match or matchmod.always(self.root, self.getcwd()) |
|
426 | match = match or matchmod.always(self.root, self.getcwd()) | |
426 |
|
427 | |||
427 | # Maybe we can use this opportunity to update Watchman's state. |
|
428 | # Maybe we can use this opportunity to update Watchman's state. | |
428 | # Mercurial uses workingcommitctx and/or memctx to represent the part of |
|
429 | # Mercurial uses workingcommitctx and/or memctx to represent the part of | |
429 | # the workingctx that is to be committed. So don't update the state in |
|
430 | # the workingctx that is to be committed. So don't update the state in | |
430 | # that case. |
|
431 | # that case. | |
431 | # HG_PENDING is set in the environment when the dirstate is being updated |
|
432 | # HG_PENDING is set in the environment when the dirstate is being updated | |
432 | # in the middle of a transaction; we must not update our state in that |
|
433 | # in the middle of a transaction; we must not update our state in that | |
433 | # case, or we risk forgetting about changes in the working copy. |
|
434 | # case, or we risk forgetting about changes in the working copy. | |
434 | updatestate = (parentworking and match.always() and |
|
435 | updatestate = (parentworking and match.always() and | |
435 | not isinstance(ctx2, (context.workingcommitctx, |
|
436 | not isinstance(ctx2, (context.workingcommitctx, | |
436 | context.memctx)) and |
|
437 | context.memctx)) and | |
437 |
'HG_PENDING' not in |
|
438 | 'HG_PENDING' not in encoding.environ) | |
438 |
|
439 | |||
439 | try: |
|
440 | try: | |
440 | if self._fsmonitorstate.walk_on_invalidate: |
|
441 | if self._fsmonitorstate.walk_on_invalidate: | |
441 | # Use a short timeout to query the current clock. If that |
|
442 | # Use a short timeout to query the current clock. If that | |
442 | # takes too long then we assume that the service will be slow |
|
443 | # takes too long then we assume that the service will be slow | |
443 | # to answer our query. |
|
444 | # to answer our query. | |
444 | # walk_on_invalidate indicates that we prefer to walk the |
|
445 | # walk_on_invalidate indicates that we prefer to walk the | |
445 | # tree ourselves because we can ignore portions that Watchman |
|
446 | # tree ourselves because we can ignore portions that Watchman | |
446 | # cannot and we tend to be faster in the warmer buffer cache |
|
447 | # cannot and we tend to be faster in the warmer buffer cache | |
447 | # cases. |
|
448 | # cases. | |
448 | self._watchmanclient.settimeout(0.1) |
|
449 | self._watchmanclient.settimeout(0.1) | |
449 | else: |
|
450 | else: | |
450 | # Give Watchman more time to potentially complete its walk |
|
451 | # Give Watchman more time to potentially complete its walk | |
451 | # and return the initial clock. In this mode we assume that |
|
452 | # and return the initial clock. In this mode we assume that | |
452 | # the filesystem will be slower than parsing a potentially |
|
453 | # the filesystem will be slower than parsing a potentially | |
453 | # very large Watchman result set. |
|
454 | # very large Watchman result set. | |
454 | self._watchmanclient.settimeout( |
|
455 | self._watchmanclient.settimeout( | |
455 | self._fsmonitorstate.timeout + 0.1) |
|
456 | self._fsmonitorstate.timeout + 0.1) | |
456 | startclock = self._watchmanclient.getcurrentclock() |
|
457 | startclock = self._watchmanclient.getcurrentclock() | |
457 | except Exception as ex: |
|
458 | except Exception as ex: | |
458 | self._watchmanclient.clearconnection() |
|
459 | self._watchmanclient.clearconnection() | |
459 | _handleunavailable(self.ui, self._fsmonitorstate, ex) |
|
460 | _handleunavailable(self.ui, self._fsmonitorstate, ex) | |
460 | # boo, Watchman failed. bail |
|
461 | # boo, Watchman failed. bail | |
461 | return orig(node1, node2, match, listignored, listclean, |
|
462 | return orig(node1, node2, match, listignored, listclean, | |
462 | listunknown, listsubrepos) |
|
463 | listunknown, listsubrepos) | |
463 |
|
464 | |||
464 | if updatestate: |
|
465 | if updatestate: | |
465 | # We need info about unknown files. This may make things slower the |
|
466 | # We need info about unknown files. This may make things slower the | |
466 | # first time, but whatever. |
|
467 | # first time, but whatever. | |
467 | stateunknown = True |
|
468 | stateunknown = True | |
468 | else: |
|
469 | else: | |
469 | stateunknown = listunknown |
|
470 | stateunknown = listunknown | |
470 |
|
471 | |||
471 | r = orig(node1, node2, match, listignored, listclean, stateunknown, |
|
472 | r = orig(node1, node2, match, listignored, listclean, stateunknown, | |
472 | listsubrepos) |
|
473 | listsubrepos) | |
473 | modified, added, removed, deleted, unknown, ignored, clean = r |
|
474 | modified, added, removed, deleted, unknown, ignored, clean = r | |
474 |
|
475 | |||
475 | if updatestate: |
|
476 | if updatestate: | |
476 | notefiles = modified + added + removed + deleted + unknown |
|
477 | notefiles = modified + added + removed + deleted + unknown | |
477 | self._fsmonitorstate.set( |
|
478 | self._fsmonitorstate.set( | |
478 | self._fsmonitorstate.getlastclock() or startclock, |
|
479 | self._fsmonitorstate.getlastclock() or startclock, | |
479 | _hashignore(self.dirstate._ignore), |
|
480 | _hashignore(self.dirstate._ignore), | |
480 | notefiles) |
|
481 | notefiles) | |
481 |
|
482 | |||
482 | if not listunknown: |
|
483 | if not listunknown: | |
483 | unknown = [] |
|
484 | unknown = [] | |
484 |
|
485 | |||
485 | # don't do paranoid checks if we're not going to query Watchman anyway |
|
486 | # don't do paranoid checks if we're not going to query Watchman anyway | |
486 | full = listclean or match.traversedir is not None |
|
487 | full = listclean or match.traversedir is not None | |
487 | if self._fsmonitorstate.mode == 'paranoid' and not full: |
|
488 | if self._fsmonitorstate.mode == 'paranoid' and not full: | |
488 | # run status again and fall back to the old walk this time |
|
489 | # run status again and fall back to the old walk this time | |
489 | self.dirstate._fsmonitordisable = True |
|
490 | self.dirstate._fsmonitordisable = True | |
490 |
|
491 | |||
491 | # shut the UI up |
|
492 | # shut the UI up | |
492 | quiet = self.ui.quiet |
|
493 | quiet = self.ui.quiet | |
493 | self.ui.quiet = True |
|
494 | self.ui.quiet = True | |
494 | fout, ferr = self.ui.fout, self.ui.ferr |
|
495 | fout, ferr = self.ui.fout, self.ui.ferr | |
495 | self.ui.fout = self.ui.ferr = open(os.devnull, 'wb') |
|
496 | self.ui.fout = self.ui.ferr = open(os.devnull, 'wb') | |
496 |
|
497 | |||
497 | try: |
|
498 | try: | |
498 | rv2 = orig( |
|
499 | rv2 = orig( | |
499 | node1, node2, match, listignored, listclean, listunknown, |
|
500 | node1, node2, match, listignored, listclean, listunknown, | |
500 | listsubrepos) |
|
501 | listsubrepos) | |
501 | finally: |
|
502 | finally: | |
502 | self.dirstate._fsmonitordisable = False |
|
503 | self.dirstate._fsmonitordisable = False | |
503 | self.ui.quiet = quiet |
|
504 | self.ui.quiet = quiet | |
504 | self.ui.fout, self.ui.ferr = fout, ferr |
|
505 | self.ui.fout, self.ui.ferr = fout, ferr | |
505 |
|
506 | |||
506 | # clean isn't tested since it's set to True above |
|
507 | # clean isn't tested since it's set to True above | |
507 | _cmpsets([modified, added, removed, deleted, unknown, ignored, clean], |
|
508 | _cmpsets([modified, added, removed, deleted, unknown, ignored, clean], | |
508 | rv2) |
|
509 | rv2) | |
509 | modified, added, removed, deleted, unknown, ignored, clean = rv2 |
|
510 | modified, added, removed, deleted, unknown, ignored, clean = rv2 | |
510 |
|
511 | |||
511 | return scmutil.status( |
|
512 | return scmutil.status( | |
512 | modified, added, removed, deleted, unknown, ignored, clean) |
|
513 | modified, added, removed, deleted, unknown, ignored, clean) | |
513 |
|
514 | |||
514 | def makedirstate(cls): |
|
515 | def makedirstate(cls): | |
515 | class fsmonitordirstate(cls): |
|
516 | class fsmonitordirstate(cls): | |
516 | def _fsmonitorinit(self, fsmonitorstate, watchmanclient): |
|
517 | def _fsmonitorinit(self, fsmonitorstate, watchmanclient): | |
517 | # _fsmonitordisable is used in paranoid mode |
|
518 | # _fsmonitordisable is used in paranoid mode | |
518 | self._fsmonitordisable = False |
|
519 | self._fsmonitordisable = False | |
519 | self._fsmonitorstate = fsmonitorstate |
|
520 | self._fsmonitorstate = fsmonitorstate | |
520 | self._watchmanclient = watchmanclient |
|
521 | self._watchmanclient = watchmanclient | |
521 |
|
522 | |||
522 | def walk(self, *args, **kwargs): |
|
523 | def walk(self, *args, **kwargs): | |
523 | orig = super(fsmonitordirstate, self).walk |
|
524 | orig = super(fsmonitordirstate, self).walk | |
524 | if self._fsmonitordisable: |
|
525 | if self._fsmonitordisable: | |
525 | return orig(*args, **kwargs) |
|
526 | return orig(*args, **kwargs) | |
526 | return overridewalk(orig, self, *args, **kwargs) |
|
527 | return overridewalk(orig, self, *args, **kwargs) | |
527 |
|
528 | |||
528 | def rebuild(self, *args, **kwargs): |
|
529 | def rebuild(self, *args, **kwargs): | |
529 | self._fsmonitorstate.invalidate() |
|
530 | self._fsmonitorstate.invalidate() | |
530 | return super(fsmonitordirstate, self).rebuild(*args, **kwargs) |
|
531 | return super(fsmonitordirstate, self).rebuild(*args, **kwargs) | |
531 |
|
532 | |||
532 | def invalidate(self, *args, **kwargs): |
|
533 | def invalidate(self, *args, **kwargs): | |
533 | self._fsmonitorstate.invalidate() |
|
534 | self._fsmonitorstate.invalidate() | |
534 | return super(fsmonitordirstate, self).invalidate(*args, **kwargs) |
|
535 | return super(fsmonitordirstate, self).invalidate(*args, **kwargs) | |
535 |
|
536 | |||
536 | return fsmonitordirstate |
|
537 | return fsmonitordirstate | |
537 |
|
538 | |||
538 | def wrapdirstate(orig, self): |
|
539 | def wrapdirstate(orig, self): | |
539 | ds = orig(self) |
|
540 | ds = orig(self) | |
540 | # only override the dirstate when Watchman is available for the repo |
|
541 | # only override the dirstate when Watchman is available for the repo | |
541 | if util.safehasattr(self, '_fsmonitorstate'): |
|
542 | if util.safehasattr(self, '_fsmonitorstate'): | |
542 | ds.__class__ = makedirstate(ds.__class__) |
|
543 | ds.__class__ = makedirstate(ds.__class__) | |
543 | ds._fsmonitorinit(self._fsmonitorstate, self._watchmanclient) |
|
544 | ds._fsmonitorinit(self._fsmonitorstate, self._watchmanclient) | |
544 | return ds |
|
545 | return ds | |
545 |
|
546 | |||
546 | def extsetup(ui): |
|
547 | def extsetup(ui): | |
547 | wrapfilecache(localrepo.localrepository, 'dirstate', wrapdirstate) |
|
548 | wrapfilecache(localrepo.localrepository, 'dirstate', wrapdirstate) | |
548 |
if |
|
549 | if pycompat.sysplatform == 'darwin': | |
549 | # An assist for avoiding the dangling-symlink fsevents bug |
|
550 | # An assist for avoiding the dangling-symlink fsevents bug | |
550 | extensions.wrapfunction(os, 'symlink', wrapsymlink) |
|
551 | extensions.wrapfunction(os, 'symlink', wrapsymlink) | |
551 |
|
552 | |||
552 | extensions.wrapfunction(merge, 'update', wrapupdate) |
|
553 | extensions.wrapfunction(merge, 'update', wrapupdate) | |
553 |
|
554 | |||
554 | def wrapsymlink(orig, source, link_name): |
|
555 | def wrapsymlink(orig, source, link_name): | |
555 | ''' if we create a dangling symlink, also touch the parent dir |
|
556 | ''' if we create a dangling symlink, also touch the parent dir | |
556 | to encourage fsevents notifications to work more correctly ''' |
|
557 | to encourage fsevents notifications to work more correctly ''' | |
557 | try: |
|
558 | try: | |
558 | return orig(source, link_name) |
|
559 | return orig(source, link_name) | |
559 | finally: |
|
560 | finally: | |
560 | try: |
|
561 | try: | |
561 | os.utime(os.path.dirname(link_name), None) |
|
562 | os.utime(os.path.dirname(link_name), None) | |
562 | except OSError: |
|
563 | except OSError: | |
563 | pass |
|
564 | pass | |
564 |
|
565 | |||
565 | class state_update(object): |
|
566 | class state_update(object): | |
566 | ''' This context manager is responsible for dispatching the state-enter |
|
567 | ''' This context manager is responsible for dispatching the state-enter | |
567 | and state-leave signals to the watchman service ''' |
|
568 | and state-leave signals to the watchman service ''' | |
568 |
|
569 | |||
569 | def __init__(self, repo, node, distance, partial): |
|
570 | def __init__(self, repo, node, distance, partial): | |
570 | self.repo = repo |
|
571 | self.repo = repo | |
571 | self.node = node |
|
572 | self.node = node | |
572 | self.distance = distance |
|
573 | self.distance = distance | |
573 | self.partial = partial |
|
574 | self.partial = partial | |
574 |
|
575 | |||
575 | def __enter__(self): |
|
576 | def __enter__(self): | |
576 | self._state('state-enter') |
|
577 | self._state('state-enter') | |
577 | return self |
|
578 | return self | |
578 |
|
579 | |||
579 | def __exit__(self, type_, value, tb): |
|
580 | def __exit__(self, type_, value, tb): | |
580 | status = 'ok' if type_ is None else 'failed' |
|
581 | status = 'ok' if type_ is None else 'failed' | |
581 | self._state('state-leave', status=status) |
|
582 | self._state('state-leave', status=status) | |
582 |
|
583 | |||
583 | def _state(self, cmd, status='ok'): |
|
584 | def _state(self, cmd, status='ok'): | |
584 | if not util.safehasattr(self.repo, '_watchmanclient'): |
|
585 | if not util.safehasattr(self.repo, '_watchmanclient'): | |
585 | return |
|
586 | return | |
586 | try: |
|
587 | try: | |
587 | commithash = self.repo[self.node].hex() |
|
588 | commithash = self.repo[self.node].hex() | |
588 | self.repo._watchmanclient.command(cmd, { |
|
589 | self.repo._watchmanclient.command(cmd, { | |
589 | 'name': 'hg.update', |
|
590 | 'name': 'hg.update', | |
590 | 'metadata': { |
|
591 | 'metadata': { | |
591 | # the target revision |
|
592 | # the target revision | |
592 | 'rev': commithash, |
|
593 | 'rev': commithash, | |
593 | # approximate number of commits between current and target |
|
594 | # approximate number of commits between current and target | |
594 | 'distance': self.distance, |
|
595 | 'distance': self.distance, | |
595 | # success/failure (only really meaningful for state-leave) |
|
596 | # success/failure (only really meaningful for state-leave) | |
596 | 'status': status, |
|
597 | 'status': status, | |
597 | # whether the working copy parent is changing |
|
598 | # whether the working copy parent is changing | |
598 | 'partial': self.partial, |
|
599 | 'partial': self.partial, | |
599 | }}) |
|
600 | }}) | |
600 | except Exception as e: |
|
601 | except Exception as e: | |
601 | # Swallow any errors; fire and forget |
|
602 | # Swallow any errors; fire and forget | |
602 | self.repo.ui.log( |
|
603 | self.repo.ui.log( | |
603 | 'watchman', 'Exception %s while running %s\n', e, cmd) |
|
604 | 'watchman', 'Exception %s while running %s\n', e, cmd) | |
604 |
|
605 | |||
605 | # Bracket working copy updates with calls to the watchman state-enter |
|
606 | # Bracket working copy updates with calls to the watchman state-enter | |
606 | # and state-leave commands. This allows clients to perform more intelligent |
|
607 | # and state-leave commands. This allows clients to perform more intelligent | |
607 | # settling during bulk file change scenarios |
|
608 | # settling during bulk file change scenarios | |
608 | # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling |
|
609 | # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling | |
609 | def wrapupdate(orig, repo, node, branchmerge, force, ancestor=None, |
|
610 | def wrapupdate(orig, repo, node, branchmerge, force, ancestor=None, | |
610 | mergeancestor=False, labels=None, matcher=None, **kwargs): |
|
611 | mergeancestor=False, labels=None, matcher=None, **kwargs): | |
611 |
|
612 | |||
612 | distance = 0 |
|
613 | distance = 0 | |
613 | partial = True |
|
614 | partial = True | |
614 | if matcher is None or matcher.always(): |
|
615 | if matcher is None or matcher.always(): | |
615 | partial = False |
|
616 | partial = False | |
616 | wc = repo[None] |
|
617 | wc = repo[None] | |
617 | parents = wc.parents() |
|
618 | parents = wc.parents() | |
618 | if len(parents) == 2: |
|
619 | if len(parents) == 2: | |
619 | anc = repo.changelog.ancestor(parents[0].node(), parents[1].node()) |
|
620 | anc = repo.changelog.ancestor(parents[0].node(), parents[1].node()) | |
620 | ancrev = repo[anc].rev() |
|
621 | ancrev = repo[anc].rev() | |
621 | distance = abs(repo[node].rev() - ancrev) |
|
622 | distance = abs(repo[node].rev() - ancrev) | |
622 | elif len(parents) == 1: |
|
623 | elif len(parents) == 1: | |
623 | distance = abs(repo[node].rev() - parents[0].rev()) |
|
624 | distance = abs(repo[node].rev() - parents[0].rev()) | |
624 |
|
625 | |||
625 | with state_update(repo, node, distance, partial): |
|
626 | with state_update(repo, node, distance, partial): | |
626 | return orig( |
|
627 | return orig( | |
627 | repo, node, branchmerge, force, ancestor, mergeancestor, |
|
628 | repo, node, branchmerge, force, ancestor, mergeancestor, | |
628 | labels, matcher, *kwargs) |
|
629 | labels, matcher, *kwargs) | |
629 |
|
630 | |||
630 | def reposetup(ui, repo): |
|
631 | def reposetup(ui, repo): | |
631 | # We don't work with largefiles or inotify |
|
632 | # We don't work with largefiles or inotify | |
632 | exts = extensions.enabled() |
|
633 | exts = extensions.enabled() | |
633 | for ext in _blacklist: |
|
634 | for ext in _blacklist: | |
634 | if ext in exts: |
|
635 | if ext in exts: | |
635 | ui.warn(_('The fsmonitor extension is incompatible with the %s ' |
|
636 | ui.warn(_('The fsmonitor extension is incompatible with the %s ' | |
636 | 'extension and has been disabled.\n') % ext) |
|
637 | 'extension and has been disabled.\n') % ext) | |
637 | return |
|
638 | return | |
638 |
|
639 | |||
639 | if util.safehasattr(repo, 'dirstate'): |
|
640 | if util.safehasattr(repo, 'dirstate'): | |
640 | # We don't work with subrepos either. Note that we can get passed in |
|
641 | # We don't work with subrepos either. Note that we can get passed in | |
641 | # e.g. a statichttprepo, which throws on trying to access the substate. |
|
642 | # e.g. a statichttprepo, which throws on trying to access the substate. | |
642 | # XXX This sucks. |
|
643 | # XXX This sucks. | |
643 | try: |
|
644 | try: | |
644 | # if repo[None].substate can cause a dirstate parse, which is too |
|
645 | # if repo[None].substate can cause a dirstate parse, which is too | |
645 | # slow. Instead, look for a file called hgsubstate, |
|
646 | # slow. Instead, look for a file called hgsubstate, | |
646 | if repo.wvfs.exists('.hgsubstate') or repo.wvfs.exists('.hgsub'): |
|
647 | if repo.wvfs.exists('.hgsubstate') or repo.wvfs.exists('.hgsub'): | |
647 | return |
|
648 | return | |
648 | except AttributeError: |
|
649 | except AttributeError: | |
649 | return |
|
650 | return | |
650 |
|
651 | |||
651 | fsmonitorstate = state.state(repo) |
|
652 | fsmonitorstate = state.state(repo) | |
652 | if fsmonitorstate.mode == 'off': |
|
653 | if fsmonitorstate.mode == 'off': | |
653 | return |
|
654 | return | |
654 |
|
655 | |||
655 | try: |
|
656 | try: | |
656 | client = watchmanclient.client(repo) |
|
657 | client = watchmanclient.client(repo) | |
657 | except Exception as ex: |
|
658 | except Exception as ex: | |
658 | _handleunavailable(ui, fsmonitorstate, ex) |
|
659 | _handleunavailable(ui, fsmonitorstate, ex) | |
659 | return |
|
660 | return | |
660 |
|
661 | |||
661 | repo._fsmonitorstate = fsmonitorstate |
|
662 | repo._fsmonitorstate = fsmonitorstate | |
662 | repo._watchmanclient = client |
|
663 | repo._watchmanclient = client | |
663 |
|
664 | |||
664 | # at this point since fsmonitorstate wasn't present, repo.dirstate is |
|
665 | # at this point since fsmonitorstate wasn't present, repo.dirstate is | |
665 | # not a fsmonitordirstate |
|
666 | # not a fsmonitordirstate | |
666 | repo.dirstate.__class__ = makedirstate(repo.dirstate.__class__) |
|
667 | repo.dirstate.__class__ = makedirstate(repo.dirstate.__class__) | |
667 | # nuke the dirstate so that _fsmonitorinit and subsequent configuration |
|
668 | # nuke the dirstate so that _fsmonitorinit and subsequent configuration | |
668 | # changes take effect on it |
|
669 | # changes take effect on it | |
669 | del repo._filecache['dirstate'] |
|
670 | del repo._filecache['dirstate'] | |
670 | delattr(repo.unfiltered(), 'dirstate') |
|
671 | delattr(repo.unfiltered(), 'dirstate') | |
671 |
|
672 | |||
672 | class fsmonitorrepo(repo.__class__): |
|
673 | class fsmonitorrepo(repo.__class__): | |
673 | def status(self, *args, **kwargs): |
|
674 | def status(self, *args, **kwargs): | |
674 | orig = super(fsmonitorrepo, self).status |
|
675 | orig = super(fsmonitorrepo, self).status | |
675 | return overridestatus(orig, self, *args, **kwargs) |
|
676 | return overridestatus(orig, self, *args, **kwargs) | |
676 |
|
677 | |||
677 | repo.__class__ = fsmonitorrepo |
|
678 | repo.__class__ = fsmonitorrepo | |
678 |
|
679 | |||
679 | def wrapfilecache(cls, propname, wrapper): |
|
680 | def wrapfilecache(cls, propname, wrapper): | |
680 | """Wraps a filecache property. These can't be wrapped using the normal |
|
681 | """Wraps a filecache property. These can't be wrapped using the normal | |
681 | wrapfunction. This should eventually go into upstream Mercurial. |
|
682 | wrapfunction. This should eventually go into upstream Mercurial. | |
682 | """ |
|
683 | """ | |
683 | assert callable(wrapper) |
|
684 | assert callable(wrapper) | |
684 | for currcls in cls.__mro__: |
|
685 | for currcls in cls.__mro__: | |
685 | if propname in currcls.__dict__: |
|
686 | if propname in currcls.__dict__: | |
686 | origfn = currcls.__dict__[propname].func |
|
687 | origfn = currcls.__dict__[propname].func | |
687 | assert callable(origfn) |
|
688 | assert callable(origfn) | |
688 | def wrap(*args, **kwargs): |
|
689 | def wrap(*args, **kwargs): | |
689 | return wrapper(origfn, *args, **kwargs) |
|
690 | return wrapper(origfn, *args, **kwargs) | |
690 | currcls.__dict__[propname].func = wrap |
|
691 | currcls.__dict__[propname].func = wrap | |
691 | break |
|
692 | break | |
692 |
|
693 | |||
693 | if currcls is object: |
|
694 | if currcls is object: | |
694 | raise AttributeError( |
|
695 | raise AttributeError( | |
695 | _("type '%s' has no property '%s'") % (cls, propname)) |
|
696 | _("type '%s' has no property '%s'") % (cls, propname)) |
@@ -1,68 +1,53 b'' | |||||
1 | #require test-repo |
|
1 | #require test-repo | |
2 |
|
2 | |||
3 | $ . "$TESTDIR/helpers-testrepo.sh" |
|
3 | $ . "$TESTDIR/helpers-testrepo.sh" | |
4 | $ check_code="$TESTDIR"/../contrib/check-code.py |
|
4 | $ check_code="$TESTDIR"/../contrib/check-code.py | |
5 | $ cd "$TESTDIR"/.. |
|
5 | $ cd "$TESTDIR"/.. | |
6 |
|
6 | |||
7 | New errors are not allowed. Warnings are strongly discouraged. |
|
7 | New errors are not allowed. Warnings are strongly discouraged. | |
8 | (The writing "no-che?k-code" is for not skipping this file when checking.) |
|
8 | (The writing "no-che?k-code" is for not skipping this file when checking.) | |
9 |
|
9 | |||
10 | $ hg locate -X contrib/python-zstandard -X hgext/fsmonitor/pywatchman | |
|
10 | $ hg locate -X contrib/python-zstandard -X hgext/fsmonitor/pywatchman | | |
11 | > sed 's-\\-/-g' | xargs "$check_code" --warnings --per-file=0 || false |
|
11 | > sed 's-\\-/-g' | xargs "$check_code" --warnings --per-file=0 || false | |
12 | hgext/fsmonitor/__init__.py:295: |
|
|||
13 | > switch_slashes = os.sep == '\\' |
|
|||
14 | use pycompat.ossep instead (py3) |
|
|||
15 | hgext/fsmonitor/__init__.py:395: |
|
|||
16 | > if 'FSMONITOR_LOG_FILE' in os.environ: |
|
|||
17 | use encoding.environ instead (py3) |
|
|||
18 | hgext/fsmonitor/__init__.py:396: |
|
|||
19 | > fn = os.environ['FSMONITOR_LOG_FILE'] |
|
|||
20 | use encoding.environ instead (py3) |
|
|||
21 | hgext/fsmonitor/__init__.py:437: |
|
|||
22 | > 'HG_PENDING' not in os.environ) |
|
|||
23 | use encoding.environ instead (py3) |
|
|||
24 | hgext/fsmonitor/__init__.py:548: |
|
|||
25 | > if sys.platform == 'darwin': |
|
|||
26 | use pycompat.sysplatform instead (py3) |
|
|||
27 | Skipping i18n/polib.py it has no-che?k-code (glob) |
|
12 | Skipping i18n/polib.py it has no-che?k-code (glob) | |
28 | mercurial/demandimport.py:309: |
|
13 | mercurial/demandimport.py:309: | |
29 | > if os.environ.get('HGDEMANDIMPORT') != 'disable': |
|
14 | > if os.environ.get('HGDEMANDIMPORT') != 'disable': | |
30 | use encoding.environ instead (py3) |
|
15 | use encoding.environ instead (py3) | |
31 | mercurial/encoding.py:54: |
|
16 | mercurial/encoding.py:54: | |
32 | > environ = os.environ |
|
17 | > environ = os.environ | |
33 | use encoding.environ instead (py3) |
|
18 | use encoding.environ instead (py3) | |
34 | mercurial/encoding.py:56: |
|
19 | mercurial/encoding.py:56: | |
35 | > environ = os.environb |
|
20 | > environ = os.environb | |
36 | use encoding.environ instead (py3) |
|
21 | use encoding.environ instead (py3) | |
37 | mercurial/encoding.py:61: |
|
22 | mercurial/encoding.py:61: | |
38 | > for k, v in os.environ.items()) |
|
23 | > for k, v in os.environ.items()) | |
39 | use encoding.environ instead (py3) |
|
24 | use encoding.environ instead (py3) | |
40 | mercurial/encoding.py:203: |
|
25 | mercurial/encoding.py:203: | |
41 | > for k, v in os.environ.items()) |
|
26 | > for k, v in os.environ.items()) | |
42 | use encoding.environ instead (py3) |
|
27 | use encoding.environ instead (py3) | |
43 | Skipping mercurial/httpclient/__init__.py it has no-che?k-code (glob) |
|
28 | Skipping mercurial/httpclient/__init__.py it has no-che?k-code (glob) | |
44 | Skipping mercurial/httpclient/_readers.py it has no-che?k-code (glob) |
|
29 | Skipping mercurial/httpclient/_readers.py it has no-che?k-code (glob) | |
45 | mercurial/policy.py:45: |
|
30 | mercurial/policy.py:45: | |
46 | > policy = os.environ.get('HGMODULEPOLICY', policy) |
|
31 | > policy = os.environ.get('HGMODULEPOLICY', policy) | |
47 | use encoding.environ instead (py3) |
|
32 | use encoding.environ instead (py3) | |
48 | Skipping mercurial/statprof.py it has no-che?k-code (glob) |
|
33 | Skipping mercurial/statprof.py it has no-che?k-code (glob) | |
49 | mercurial/win32.py:443: |
|
34 | mercurial/win32.py:443: | |
50 | > env, os.getcwd(), ctypes.byref(si), ctypes.byref(pi)) |
|
35 | > env, os.getcwd(), ctypes.byref(si), ctypes.byref(pi)) | |
51 | use pycompat.getcwd instead (py3) |
|
36 | use pycompat.getcwd instead (py3) | |
52 | [1] |
|
37 | [1] | |
53 |
|
38 | |||
54 | @commands in debugcommands.py should be in alphabetical order. |
|
39 | @commands in debugcommands.py should be in alphabetical order. | |
55 |
|
40 | |||
56 | >>> import re |
|
41 | >>> import re | |
57 | >>> commands = [] |
|
42 | >>> commands = [] | |
58 | >>> with open('mercurial/debugcommands.py', 'rb') as fh: |
|
43 | >>> with open('mercurial/debugcommands.py', 'rb') as fh: | |
59 | ... for line in fh: |
|
44 | ... for line in fh: | |
60 | ... m = re.match("^@command\('([a-z]+)", line) |
|
45 | ... m = re.match("^@command\('([a-z]+)", line) | |
61 | ... if m: |
|
46 | ... if m: | |
62 | ... commands.append(m.group(1)) |
|
47 | ... commands.append(m.group(1)) | |
63 | >>> scommands = list(sorted(commands)) |
|
48 | >>> scommands = list(sorted(commands)) | |
64 | >>> for i, command in enumerate(scommands): |
|
49 | >>> for i, command in enumerate(scommands): | |
65 | ... if command != commands[i]: |
|
50 | ... if command != commands[i]: | |
66 | ... print('commands in debugcommands.py not sorted; first differing ' |
|
51 | ... print('commands in debugcommands.py not sorted; first differing ' | |
67 | ... 'command is %s; expected %s' % (commands[i], command)) |
|
52 | ... 'command is %s; expected %s' % (commands[i], command)) | |
68 | ... break |
|
53 | ... break |
General Comments 0
You need to be logged in to leave comments.
Login now