##// END OF EJS Templates
match: make explicitdir and traversedir None by default...
Siddharth Agarwal -
r19143:3cb94685 default
parent child Browse files
Show More
@@ -1,172 +1,173 b''
1 1 # client.py - inotify status client
2 2 #
3 3 # Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
4 4 # Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
5 5 # Copyright 2009 Nicolas Dumazet <nicdumz@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 from mercurial.i18n import _
11 11 import common, server
12 12 import errno, os, socket, struct
13 13
14 14 class QueryFailed(Exception):
15 15 pass
16 16
17 17 def start_server(function):
18 18 """
19 19 Decorator.
20 20 Tries to call function, if it fails, try to (re)start inotify server.
21 21 Raise QueryFailed if something went wrong
22 22 """
23 23 def decorated_function(self, *args):
24 24 try:
25 25 return function(self, *args)
26 26 except (OSError, socket.error), err:
27 27 autostart = self.ui.configbool('inotify', 'autostart', True)
28 28
29 29 if err.args[0] == errno.ECONNREFUSED:
30 30 self.ui.warn(_('inotify-client: found dead inotify server '
31 31 'socket; removing it\n'))
32 32 os.unlink(os.path.join(self.root, '.hg', 'inotify.sock'))
33 33 if err.args[0] in (errno.ECONNREFUSED, errno.ENOENT) and autostart:
34 34 try:
35 35 try:
36 36 server.start(self.ui, self.dirstate, self.root,
37 37 dict(daemon=True, daemon_pipefds=''))
38 38 except server.AlreadyStartedException, inst:
39 39 # another process may have started its own
40 40 # inotify server while this one was starting.
41 41 self.ui.debug(str(inst))
42 42 except Exception, inst:
43 43 self.ui.warn(_('inotify-client: could not start inotify '
44 44 'server: %s\n') % inst)
45 45 else:
46 46 try:
47 47 return function(self, *args)
48 48 except socket.error, err:
49 49 self.ui.warn(_('inotify-client: could not talk to new '
50 50 'inotify server: %s\n') % err.args[-1])
51 51 elif err.args[0] in (errno.ECONNREFUSED, errno.ENOENT):
52 52 # silently ignore normal errors if autostart is False
53 53 self.ui.debug('(inotify server not running)\n')
54 54 else:
55 55 self.ui.warn(_('inotify-client: failed to contact inotify '
56 56 'server: %s\n') % err.args[-1])
57 57
58 58 self.ui.traceback()
59 59 raise QueryFailed('inotify query failed')
60 60
61 61 return decorated_function
62 62
63 63
64 64 class client(object):
65 65 def __init__(self, ui, repo):
66 66 self.ui = ui
67 67 self.dirstate = repo.dirstate
68 68 self.root = repo.root
69 69 self.sock = socket.socket(socket.AF_UNIX)
70 70
71 71 def _connect(self):
72 72 sockpath = os.path.join(self.root, '.hg', 'inotify.sock')
73 73 try:
74 74 self.sock.connect(sockpath)
75 75 except socket.error, err:
76 76 if err.args[0] == "AF_UNIX path too long":
77 77 sockpath = os.readlink(sockpath)
78 78 self.sock.connect(sockpath)
79 79 else:
80 80 raise
81 81
82 82 def _send(self, type, data):
83 83 """Sends protocol version number, and the data"""
84 84 self.sock.sendall(chr(common.version) + type + data)
85 85
86 86 self.sock.shutdown(socket.SHUT_WR)
87 87
88 88 def _receive(self, type):
89 89 """
90 90 Read data, check version number, extract headers,
91 91 and returns a tuple (data descriptor, header)
92 92 Raises QueryFailed on error
93 93 """
94 94 cs = common.recvcs(self.sock)
95 95 try:
96 96 version = ord(cs.read(1))
97 97 except TypeError:
98 98 # empty answer, assume the server crashed
99 99 self.ui.warn(_('inotify-client: received empty answer from inotify '
100 100 'server'))
101 101 raise QueryFailed('server crashed')
102 102
103 103 if version != common.version:
104 104 self.ui.warn(_('(inotify: received response from incompatible '
105 105 'server version %d)\n') % version)
106 106 raise QueryFailed('incompatible server version')
107 107
108 108 readtype = cs.read(4)
109 109 if readtype != type:
110 110 self.ui.warn(_('(inotify: received \'%s\' response when expecting'
111 111 ' \'%s\')\n') % (readtype, type))
112 112 raise QueryFailed('wrong response type')
113 113
114 114 hdrfmt = common.resphdrfmts[type]
115 115 hdrsize = common.resphdrsizes[type]
116 116 try:
117 117 resphdr = struct.unpack(hdrfmt, cs.read(hdrsize))
118 118 except struct.error:
119 119 raise QueryFailed('unable to retrieve query response headers')
120 120
121 121 return cs, resphdr
122 122
123 123 def query(self, type, req):
124 124 self._connect()
125 125
126 126 self._send(type, req)
127 127
128 128 return self._receive(type)
129 129
130 130 @start_server
131 131 def statusquery(self, names, match, ignored, clean, unknown=True):
132 132
133 133 def genquery():
134 134 for n in names:
135 135 yield n
136 136 states = 'almrx!'
137 137 if ignored:
138 138 raise ValueError('this is insanity')
139 139 if clean:
140 140 states += 'c'
141 141 if unknown:
142 142 states += '?'
143 143 yield states
144 144
145 145 req = '\0'.join(genquery())
146 146
147 147 cs, resphdr = self.query('STAT', req)
148 148
149 149 def readnames(nbytes):
150 150 if nbytes:
151 151 names = cs.read(nbytes)
152 152 if names:
153 153 return filter(match, names.split('\0'))
154 154 return []
155 155 results = tuple(map(readnames, resphdr[:-1]))
156 156
157 157 if names:
158 158 nbytes = resphdr[-1]
159 159 vdirs = cs.read(nbytes)
160 160 if vdirs:
161 161 for vdir in vdirs.split('\0'):
162 match.explicitdir(vdir)
162 if match.explicitdir:
163 match.explicitdir(vdir)
163 164
164 165 return results
165 166
166 167 @start_server
167 168 def debugquery(self):
168 169 cs, resphdr = self.query('DBUG', '')
169 170
170 171 nbytes = resphdr[0]
171 172 names = cs.read(nbytes)
172 173 return names.split('\0')
@@ -1,817 +1,820 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 import errno
8 8
9 9 from node import nullid
10 10 from i18n import _
11 11 import scmutil, util, ignore, osutil, parsers, encoding
12 12 import os, stat, errno, gc
13 13
14 14 propertycache = util.propertycache
15 15 filecache = scmutil.filecache
16 16 _rangemask = 0x7fffffff
17 17
18 18 class repocache(filecache):
19 19 """filecache for files in .hg/"""
20 20 def join(self, obj, fname):
21 21 return obj._opener.join(fname)
22 22
23 23 class rootcache(filecache):
24 24 """filecache for files in the repository root"""
25 25 def join(self, obj, fname):
26 26 return obj._join(fname)
27 27
28 28 class dirstate(object):
29 29
30 30 def __init__(self, opener, ui, root, validate):
31 31 '''Create a new dirstate object.
32 32
33 33 opener is an open()-like callable that can be used to open the
34 34 dirstate file; root is the root of the directory tracked by
35 35 the dirstate.
36 36 '''
37 37 self._opener = opener
38 38 self._validate = validate
39 39 self._root = root
40 40 self._rootdir = os.path.join(root, '')
41 41 self._dirty = False
42 42 self._dirtypl = False
43 43 self._lastnormaltime = 0
44 44 self._ui = ui
45 45 self._filecache = {}
46 46
47 47 @propertycache
48 48 def _map(self):
49 49 '''Return the dirstate contents as a map from filename to
50 50 (state, mode, size, time).'''
51 51 self._read()
52 52 return self._map
53 53
54 54 @propertycache
55 55 def _copymap(self):
56 56 self._read()
57 57 return self._copymap
58 58
59 59 @propertycache
60 60 def _foldmap(self):
61 61 f = {}
62 62 for name, s in self._map.iteritems():
63 63 if s[0] != 'r':
64 64 f[util.normcase(name)] = name
65 65 for name in self._dirs:
66 66 f[util.normcase(name)] = name
67 67 f['.'] = '.' # prevents useless util.fspath() invocation
68 68 return f
69 69
70 70 @repocache('branch')
71 71 def _branch(self):
72 72 try:
73 73 return self._opener.read("branch").strip() or "default"
74 74 except IOError, inst:
75 75 if inst.errno != errno.ENOENT:
76 76 raise
77 77 return "default"
78 78
79 79 @propertycache
80 80 def _pl(self):
81 81 try:
82 82 fp = self._opener("dirstate")
83 83 st = fp.read(40)
84 84 fp.close()
85 85 l = len(st)
86 86 if l == 40:
87 87 return st[:20], st[20:40]
88 88 elif l > 0 and l < 40:
89 89 raise util.Abort(_('working directory state appears damaged!'))
90 90 except IOError, err:
91 91 if err.errno != errno.ENOENT:
92 92 raise
93 93 return [nullid, nullid]
94 94
95 95 @propertycache
96 96 def _dirs(self):
97 97 return scmutil.dirs(self._map, 'r')
98 98
99 99 def dirs(self):
100 100 return self._dirs
101 101
102 102 @rootcache('.hgignore')
103 103 def _ignore(self):
104 104 files = [self._join('.hgignore')]
105 105 for name, path in self._ui.configitems("ui"):
106 106 if name == 'ignore' or name.startswith('ignore.'):
107 107 files.append(util.expandpath(path))
108 108 return ignore.ignore(self._root, files, self._ui.warn)
109 109
110 110 @propertycache
111 111 def _slash(self):
112 112 return self._ui.configbool('ui', 'slash') and os.sep != '/'
113 113
114 114 @propertycache
115 115 def _checklink(self):
116 116 return util.checklink(self._root)
117 117
118 118 @propertycache
119 119 def _checkexec(self):
120 120 return util.checkexec(self._root)
121 121
122 122 @propertycache
123 123 def _checkcase(self):
124 124 return not util.checkcase(self._join('.hg'))
125 125
126 126 def _join(self, f):
127 127 # much faster than os.path.join()
128 128 # it's safe because f is always a relative path
129 129 return self._rootdir + f
130 130
131 131 def flagfunc(self, buildfallback):
132 132 if self._checklink and self._checkexec:
133 133 def f(x):
134 134 try:
135 135 st = os.lstat(self._join(x))
136 136 if util.statislink(st):
137 137 return 'l'
138 138 if util.statisexec(st):
139 139 return 'x'
140 140 except OSError:
141 141 pass
142 142 return ''
143 143 return f
144 144
145 145 fallback = buildfallback()
146 146 if self._checklink:
147 147 def f(x):
148 148 if os.path.islink(self._join(x)):
149 149 return 'l'
150 150 if 'x' in fallback(x):
151 151 return 'x'
152 152 return ''
153 153 return f
154 154 if self._checkexec:
155 155 def f(x):
156 156 if 'l' in fallback(x):
157 157 return 'l'
158 158 if util.isexec(self._join(x)):
159 159 return 'x'
160 160 return ''
161 161 return f
162 162 else:
163 163 return fallback
164 164
165 165 def getcwd(self):
166 166 cwd = os.getcwd()
167 167 if cwd == self._root:
168 168 return ''
169 169 # self._root ends with a path separator if self._root is '/' or 'C:\'
170 170 rootsep = self._root
171 171 if not util.endswithsep(rootsep):
172 172 rootsep += os.sep
173 173 if cwd.startswith(rootsep):
174 174 return cwd[len(rootsep):]
175 175 else:
176 176 # we're outside the repo. return an absolute path.
177 177 return cwd
178 178
179 179 def pathto(self, f, cwd=None):
180 180 if cwd is None:
181 181 cwd = self.getcwd()
182 182 path = util.pathto(self._root, cwd, f)
183 183 if self._slash:
184 184 return util.normpath(path)
185 185 return path
186 186
187 187 def __getitem__(self, key):
188 188 '''Return the current state of key (a filename) in the dirstate.
189 189
190 190 States are:
191 191 n normal
192 192 m needs merging
193 193 r marked for removal
194 194 a marked for addition
195 195 ? not tracked
196 196 '''
197 197 return self._map.get(key, ("?",))[0]
198 198
199 199 def __contains__(self, key):
200 200 return key in self._map
201 201
202 202 def __iter__(self):
203 203 for x in sorted(self._map):
204 204 yield x
205 205
206 206 def iteritems(self):
207 207 return self._map.iteritems()
208 208
209 209 def parents(self):
210 210 return [self._validate(p) for p in self._pl]
211 211
212 212 def p1(self):
213 213 return self._validate(self._pl[0])
214 214
215 215 def p2(self):
216 216 return self._validate(self._pl[1])
217 217
218 218 def branch(self):
219 219 return encoding.tolocal(self._branch)
220 220
221 221 def setparents(self, p1, p2=nullid):
222 222 """Set dirstate parents to p1 and p2.
223 223
224 224 When moving from two parents to one, 'm' merged entries a
225 225 adjusted to normal and previous copy records discarded and
226 226 returned by the call.
227 227
228 228 See localrepo.setparents()
229 229 """
230 230 self._dirty = self._dirtypl = True
231 231 oldp2 = self._pl[1]
232 232 self._pl = p1, p2
233 233 copies = {}
234 234 if oldp2 != nullid and p2 == nullid:
235 235 # Discard 'm' markers when moving away from a merge state
236 236 for f, s in self._map.iteritems():
237 237 if s[0] == 'm':
238 238 if f in self._copymap:
239 239 copies[f] = self._copymap[f]
240 240 self.normallookup(f)
241 241 return copies
242 242
243 243 def setbranch(self, branch):
244 244 self._branch = encoding.fromlocal(branch)
245 245 f = self._opener('branch', 'w', atomictemp=True)
246 246 try:
247 247 f.write(self._branch + '\n')
248 248 f.close()
249 249
250 250 # make sure filecache has the correct stat info for _branch after
251 251 # replacing the underlying file
252 252 ce = self._filecache['_branch']
253 253 if ce:
254 254 ce.refresh()
255 255 except: # re-raises
256 256 f.discard()
257 257 raise
258 258
259 259 def _read(self):
260 260 self._map = {}
261 261 self._copymap = {}
262 262 try:
263 263 st = self._opener.read("dirstate")
264 264 except IOError, err:
265 265 if err.errno != errno.ENOENT:
266 266 raise
267 267 return
268 268 if not st:
269 269 return
270 270
271 271 # Python's garbage collector triggers a GC each time a certain number
272 272 # of container objects (the number being defined by
273 273 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
274 274 # for each file in the dirstate. The C version then immediately marks
275 275 # them as not to be tracked by the collector. However, this has no
276 276 # effect on when GCs are triggered, only on what objects the GC looks
277 277 # into. This means that O(number of files) GCs are unavoidable.
278 278 # Depending on when in the process's lifetime the dirstate is parsed,
279 279 # this can get very expensive. As a workaround, disable GC while
280 280 # parsing the dirstate.
281 281 gcenabled = gc.isenabled()
282 282 gc.disable()
283 283 try:
284 284 p = parsers.parse_dirstate(self._map, self._copymap, st)
285 285 finally:
286 286 if gcenabled:
287 287 gc.enable()
288 288 if not self._dirtypl:
289 289 self._pl = p
290 290
291 291 def invalidate(self):
292 292 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
293 293 "_ignore"):
294 294 if a in self.__dict__:
295 295 delattr(self, a)
296 296 self._lastnormaltime = 0
297 297 self._dirty = False
298 298
299 299 def copy(self, source, dest):
300 300 """Mark dest as a copy of source. Unmark dest if source is None."""
301 301 if source == dest:
302 302 return
303 303 self._dirty = True
304 304 if source is not None:
305 305 self._copymap[dest] = source
306 306 elif dest in self._copymap:
307 307 del self._copymap[dest]
308 308
309 309 def copied(self, file):
310 310 return self._copymap.get(file, None)
311 311
312 312 def copies(self):
313 313 return self._copymap
314 314
315 315 def _droppath(self, f):
316 316 if self[f] not in "?r" and "_dirs" in self.__dict__:
317 317 self._dirs.delpath(f)
318 318
319 319 def _addpath(self, f, state, mode, size, mtime):
320 320 oldstate = self[f]
321 321 if state == 'a' or oldstate == 'r':
322 322 scmutil.checkfilename(f)
323 323 if f in self._dirs:
324 324 raise util.Abort(_('directory %r already in dirstate') % f)
325 325 # shadows
326 326 for d in scmutil.finddirs(f):
327 327 if d in self._dirs:
328 328 break
329 329 if d in self._map and self[d] != 'r':
330 330 raise util.Abort(
331 331 _('file %r in dirstate clashes with %r') % (d, f))
332 332 if oldstate in "?r" and "_dirs" in self.__dict__:
333 333 self._dirs.addpath(f)
334 334 self._dirty = True
335 335 self._map[f] = (state, mode, size, mtime)
336 336
337 337 def normal(self, f):
338 338 '''Mark a file normal and clean.'''
339 339 s = os.lstat(self._join(f))
340 340 mtime = int(s.st_mtime)
341 341 self._addpath(f, 'n', s.st_mode,
342 342 s.st_size & _rangemask, mtime & _rangemask)
343 343 if f in self._copymap:
344 344 del self._copymap[f]
345 345 if mtime > self._lastnormaltime:
346 346 # Remember the most recent modification timeslot for status(),
347 347 # to make sure we won't miss future size-preserving file content
348 348 # modifications that happen within the same timeslot.
349 349 self._lastnormaltime = mtime
350 350
351 351 def normallookup(self, f):
352 352 '''Mark a file normal, but possibly dirty.'''
353 353 if self._pl[1] != nullid and f in self._map:
354 354 # if there is a merge going on and the file was either
355 355 # in state 'm' (-1) or coming from other parent (-2) before
356 356 # being removed, restore that state.
357 357 entry = self._map[f]
358 358 if entry[0] == 'r' and entry[2] in (-1, -2):
359 359 source = self._copymap.get(f)
360 360 if entry[2] == -1:
361 361 self.merge(f)
362 362 elif entry[2] == -2:
363 363 self.otherparent(f)
364 364 if source:
365 365 self.copy(source, f)
366 366 return
367 367 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
368 368 return
369 369 self._addpath(f, 'n', 0, -1, -1)
370 370 if f in self._copymap:
371 371 del self._copymap[f]
372 372
373 373 def otherparent(self, f):
374 374 '''Mark as coming from the other parent, always dirty.'''
375 375 if self._pl[1] == nullid:
376 376 raise util.Abort(_("setting %r to other parent "
377 377 "only allowed in merges") % f)
378 378 self._addpath(f, 'n', 0, -2, -1)
379 379 if f in self._copymap:
380 380 del self._copymap[f]
381 381
382 382 def add(self, f):
383 383 '''Mark a file added.'''
384 384 self._addpath(f, 'a', 0, -1, -1)
385 385 if f in self._copymap:
386 386 del self._copymap[f]
387 387
388 388 def remove(self, f):
389 389 '''Mark a file removed.'''
390 390 self._dirty = True
391 391 self._droppath(f)
392 392 size = 0
393 393 if self._pl[1] != nullid and f in self._map:
394 394 # backup the previous state
395 395 entry = self._map[f]
396 396 if entry[0] == 'm': # merge
397 397 size = -1
398 398 elif entry[0] == 'n' and entry[2] == -2: # other parent
399 399 size = -2
400 400 self._map[f] = ('r', 0, size, 0)
401 401 if size == 0 and f in self._copymap:
402 402 del self._copymap[f]
403 403
404 404 def merge(self, f):
405 405 '''Mark a file merged.'''
406 406 if self._pl[1] == nullid:
407 407 return self.normallookup(f)
408 408 s = os.lstat(self._join(f))
409 409 self._addpath(f, 'm', s.st_mode,
410 410 s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
411 411 if f in self._copymap:
412 412 del self._copymap[f]
413 413
414 414 def drop(self, f):
415 415 '''Drop a file from the dirstate'''
416 416 if f in self._map:
417 417 self._dirty = True
418 418 self._droppath(f)
419 419 del self._map[f]
420 420
421 421 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
422 422 normed = util.normcase(path)
423 423 folded = self._foldmap.get(normed, None)
424 424 if folded is None:
425 425 if isknown:
426 426 folded = path
427 427 else:
428 428 if exists is None:
429 429 exists = os.path.lexists(os.path.join(self._root, path))
430 430 if not exists:
431 431 # Maybe a path component exists
432 432 if not ignoremissing and '/' in path:
433 433 d, f = path.rsplit('/', 1)
434 434 d = self._normalize(d, isknown, ignoremissing, None)
435 435 folded = d + "/" + f
436 436 else:
437 437 # No path components, preserve original case
438 438 folded = path
439 439 else:
440 440 # recursively normalize leading directory components
441 441 # against dirstate
442 442 if '/' in normed:
443 443 d, f = normed.rsplit('/', 1)
444 444 d = self._normalize(d, isknown, ignoremissing, True)
445 445 r = self._root + "/" + d
446 446 folded = d + "/" + util.fspath(f, r)
447 447 else:
448 448 folded = util.fspath(normed, self._root)
449 449 self._foldmap[normed] = folded
450 450
451 451 return folded
452 452
453 453 def normalize(self, path, isknown=False, ignoremissing=False):
454 454 '''
455 455 normalize the case of a pathname when on a casefolding filesystem
456 456
457 457 isknown specifies whether the filename came from walking the
458 458 disk, to avoid extra filesystem access.
459 459
460 460 If ignoremissing is True, missing path are returned
461 461 unchanged. Otherwise, we try harder to normalize possibly
462 462 existing path components.
463 463
464 464 The normalized case is determined based on the following precedence:
465 465
466 466 - version of name already stored in the dirstate
467 467 - version of name stored on disk
468 468 - version provided via command arguments
469 469 '''
470 470
471 471 if self._checkcase:
472 472 return self._normalize(path, isknown, ignoremissing)
473 473 return path
474 474
475 475 def clear(self):
476 476 self._map = {}
477 477 if "_dirs" in self.__dict__:
478 478 delattr(self, "_dirs")
479 479 self._copymap = {}
480 480 self._pl = [nullid, nullid]
481 481 self._lastnormaltime = 0
482 482 self._dirty = True
483 483
484 484 def rebuild(self, parent, allfiles, changedfiles=None):
485 485 changedfiles = changedfiles or allfiles
486 486 oldmap = self._map
487 487 self.clear()
488 488 for f in allfiles:
489 489 if f not in changedfiles:
490 490 self._map[f] = oldmap[f]
491 491 else:
492 492 if 'x' in allfiles.flags(f):
493 493 self._map[f] = ('n', 0777, -1, 0)
494 494 else:
495 495 self._map[f] = ('n', 0666, -1, 0)
496 496 self._pl = (parent, nullid)
497 497 self._dirty = True
498 498
499 499 def write(self):
500 500 if not self._dirty:
501 501 return
502 502 st = self._opener("dirstate", "w", atomictemp=True)
503 503
504 504 def finish(s):
505 505 st.write(s)
506 506 st.close()
507 507 self._lastnormaltime = 0
508 508 self._dirty = self._dirtypl = False
509 509
510 510 # use the modification time of the newly created temporary file as the
511 511 # filesystem's notion of 'now'
512 512 now = util.fstat(st).st_mtime
513 513 finish(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
514 514
515 515 def _dirignore(self, f):
516 516 if f == '.':
517 517 return False
518 518 if self._ignore(f):
519 519 return True
520 520 for p in scmutil.finddirs(f):
521 521 if self._ignore(p):
522 522 return True
523 523 return False
524 524
525 525 def walk(self, match, subrepos, unknown, ignored):
526 526 '''
527 527 Walk recursively through the directory tree, finding all files
528 528 matched by match.
529 529
530 530 Return a dict mapping filename to stat-like object (either
531 531 mercurial.osutil.stat instance or return value of os.stat()).
532 532 '''
533 533
534 534 def fwarn(f, msg):
535 535 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
536 536 return False
537 537
538 538 def badtype(mode):
539 539 kind = _('unknown')
540 540 if stat.S_ISCHR(mode):
541 541 kind = _('character device')
542 542 elif stat.S_ISBLK(mode):
543 543 kind = _('block device')
544 544 elif stat.S_ISFIFO(mode):
545 545 kind = _('fifo')
546 546 elif stat.S_ISSOCK(mode):
547 547 kind = _('socket')
548 548 elif stat.S_ISDIR(mode):
549 549 kind = _('directory')
550 550 return _('unsupported file type (type is %s)') % kind
551 551
552 552 ignore = self._ignore
553 553 dirignore = self._dirignore
554 554 if ignored:
555 555 ignore = util.never
556 556 dirignore = util.never
557 557 elif not unknown:
558 558 # if unknown and ignored are False, skip step 2
559 559 ignore = util.always
560 560 dirignore = util.always
561 561
562 562 matchfn = match.matchfn
563 563 matchalways = match.always()
564 564 matchedir = match.explicitdir
565 565 matchtdir = match.traversedir
566 566 badfn = match.bad
567 567 dmap = self._map
568 568 normpath = util.normpath
569 569 listdir = osutil.listdir
570 570 lstat = os.lstat
571 571 getkind = stat.S_IFMT
572 572 dirkind = stat.S_IFDIR
573 573 regkind = stat.S_IFREG
574 574 lnkkind = stat.S_IFLNK
575 575 join = self._join
576 576 work = []
577 577 wadd = work.append
578 578
579 579 exact = skipstep3 = False
580 580 if matchfn == match.exact: # match.exact
581 581 exact = True
582 582 dirignore = util.always # skip step 2
583 583 elif match.files() and not match.anypats(): # match.match, no patterns
584 584 skipstep3 = True
585 585
586 586 if not exact and self._checkcase:
587 587 normalize = self._normalize
588 588 skipstep3 = False
589 589 else:
590 590 normalize = None
591 591
592 592 files = sorted(match.files())
593 593 subrepos.sort()
594 594 i, j = 0, 0
595 595 while i < len(files) and j < len(subrepos):
596 596 subpath = subrepos[j] + "/"
597 597 if files[i] < subpath:
598 598 i += 1
599 599 continue
600 600 while i < len(files) and files[i].startswith(subpath):
601 601 del files[i]
602 602 j += 1
603 603
604 604 if not files or '.' in files:
605 605 files = ['']
606 606 results = dict.fromkeys(subrepos)
607 607 results['.hg'] = None
608 608
609 609 # step 1: find all explicit files
610 610 for ff in files:
611 611 if normalize:
612 612 nf = normalize(normpath(ff), False, True)
613 613 else:
614 614 nf = normpath(ff)
615 615 if nf in results:
616 616 continue
617 617
618 618 try:
619 619 st = lstat(join(nf))
620 620 kind = getkind(st.st_mode)
621 621 if kind == dirkind:
622 622 skipstep3 = False
623 623 if nf in dmap:
624 624 #file deleted on disk but still in dirstate
625 625 results[nf] = None
626 matchedir(nf)
626 if matchedir:
627 matchedir(nf)
627 628 if not dirignore(nf):
628 629 wadd(nf)
629 630 elif kind == regkind or kind == lnkkind:
630 631 results[nf] = st
631 632 else:
632 633 badfn(ff, badtype(kind))
633 634 if nf in dmap:
634 635 results[nf] = None
635 636 except OSError, inst:
636 637 if nf in dmap: # does it exactly match a file?
637 638 results[nf] = None
638 639 else: # does it match a directory?
639 640 prefix = nf + "/"
640 641 for fn in dmap:
641 642 if fn.startswith(prefix):
642 matchedir(nf)
643 if matchedir:
644 matchedir(nf)
643 645 skipstep3 = False
644 646 break
645 647 else:
646 648 badfn(ff, inst.strerror)
647 649
648 650 # step 2: visit subdirectories
649 651 while work:
650 652 nd = work.pop()
651 653 skip = None
652 654 if nd == '.':
653 655 nd = ''
654 656 else:
655 657 skip = '.hg'
656 658 try:
657 659 entries = listdir(join(nd), stat=True, skip=skip)
658 660 except OSError, inst:
659 661 if inst.errno in (errno.EACCES, errno.ENOENT):
660 662 fwarn(nd, inst.strerror)
661 663 continue
662 664 raise
663 665 for f, kind, st in entries:
664 666 if normalize:
665 667 nf = normalize(nd and (nd + "/" + f) or f, True, True)
666 668 else:
667 669 nf = nd and (nd + "/" + f) or f
668 670 if nf not in results:
669 671 if kind == dirkind:
670 672 if not ignore(nf):
671 matchtdir(nf)
673 if matchtdir:
674 matchtdir(nf)
672 675 wadd(nf)
673 676 if nf in dmap and (matchalways or matchfn(nf)):
674 677 results[nf] = None
675 678 elif kind == regkind or kind == lnkkind:
676 679 if nf in dmap:
677 680 if matchalways or matchfn(nf):
678 681 results[nf] = st
679 682 elif (matchalways or matchfn(nf)) and not ignore(nf):
680 683 results[nf] = st
681 684 elif nf in dmap and (matchalways or matchfn(nf)):
682 685 results[nf] = None
683 686
684 687 for s in subrepos:
685 688 del results[s]
686 689 del results['.hg']
687 690
688 691 # step 3: report unseen items in the dmap hash
689 692 if not skipstep3 and not exact:
690 693 if not results and matchalways:
691 694 visit = dmap.keys()
692 695 else:
693 696 visit = [f for f in dmap if f not in results and matchfn(f)]
694 697 visit.sort()
695 698
696 699 if unknown:
697 700 # unknown == True means we walked the full directory tree above.
698 701 # So if a file is not seen it was either a) not matching matchfn
699 702 # b) ignored, c) missing, or d) under a symlink directory.
700 703 audit_path = scmutil.pathauditor(self._root)
701 704
702 705 for nf in iter(visit):
703 706 # Report ignored items in the dmap as long as they are not
704 707 # under a symlink directory.
705 708 if audit_path.check(nf):
706 709 try:
707 710 results[nf] = lstat(join(nf))
708 711 except OSError:
709 712 # file doesn't exist
710 713 results[nf] = None
711 714 else:
712 715 # It's either missing or under a symlink directory
713 716 results[nf] = None
714 717 else:
715 718 # We may not have walked the full directory tree above,
716 719 # so stat everything we missed.
717 720 nf = iter(visit).next
718 721 for st in util.statfiles([join(i) for i in visit]):
719 722 results[nf()] = st
720 723 return results
721 724
722 725 def status(self, match, subrepos, ignored, clean, unknown):
723 726 '''Determine the status of the working copy relative to the
724 727 dirstate and return a tuple of lists (unsure, modified, added,
725 728 removed, deleted, unknown, ignored, clean), where:
726 729
727 730 unsure:
728 731 files that might have been modified since the dirstate was
729 732 written, but need to be read to be sure (size is the same
730 733 but mtime differs)
731 734 modified:
732 735 files that have definitely been modified since the dirstate
733 736 was written (different size or mode)
734 737 added:
735 738 files that have been explicitly added with hg add
736 739 removed:
737 740 files that have been explicitly removed with hg remove
738 741 deleted:
739 742 files that have been deleted through other means ("missing")
740 743 unknown:
741 744 files not in the dirstate that are not ignored
742 745 ignored:
743 746 files not in the dirstate that are ignored
744 747 (by _dirignore())
745 748 clean:
746 749 files that have definitely not been modified since the
747 750 dirstate was written
748 751 '''
749 752 listignored, listclean, listunknown = ignored, clean, unknown
750 753 lookup, modified, added, unknown, ignored = [], [], [], [], []
751 754 removed, deleted, clean = [], [], []
752 755
753 756 dmap = self._map
754 757 ladd = lookup.append # aka "unsure"
755 758 madd = modified.append
756 759 aadd = added.append
757 760 uadd = unknown.append
758 761 iadd = ignored.append
759 762 radd = removed.append
760 763 dadd = deleted.append
761 764 cadd = clean.append
762 765 mexact = match.exact
763 766 dirignore = self._dirignore
764 767 checkexec = self._checkexec
765 768 checklink = self._checklink
766 769 copymap = self._copymap
767 770 lastnormaltime = self._lastnormaltime
768 771
769 772 lnkkind = stat.S_IFLNK
770 773
771 774 for fn, st in self.walk(match, subrepos, listunknown,
772 775 listignored).iteritems():
773 776 if fn not in dmap:
774 777 if (listignored or mexact(fn)) and dirignore(fn):
775 778 if listignored:
776 779 iadd(fn)
777 780 elif listunknown:
778 781 uadd(fn)
779 782 continue
780 783
781 784 state, mode, size, time = dmap[fn]
782 785
783 786 if not st and state in "nma":
784 787 dadd(fn)
785 788 elif state == 'n':
786 789 # The "mode & lnkkind != lnkkind or self._checklink"
787 790 # lines are an expansion of "islink => checklink"
788 791 # where islink means "is this a link?" and checklink
789 792 # means "can we check links?".
790 793 mtime = int(st.st_mtime)
791 794 if (size >= 0 and
792 795 ((size != st.st_size and size != st.st_size & _rangemask)
793 796 or ((mode ^ st.st_mode) & 0100 and checkexec))
794 797 and (mode & lnkkind != lnkkind or checklink)
795 798 or size == -2 # other parent
796 799 or fn in copymap):
797 800 madd(fn)
798 801 elif ((time != mtime and time != mtime & _rangemask)
799 802 and (mode & lnkkind != lnkkind or checklink)):
800 803 ladd(fn)
801 804 elif mtime == lastnormaltime:
802 805 # fn may have been changed in the same timeslot without
803 806 # changing its size. This can happen if we quickly do
804 807 # multiple commits in a single transaction.
805 808 # Force lookup, so we don't miss such a racy file change.
806 809 ladd(fn)
807 810 elif listclean:
808 811 cadd(fn)
809 812 elif state == 'm':
810 813 madd(fn)
811 814 elif state == 'a':
812 815 aadd(fn)
813 816 elif state == 'r':
814 817 radd(fn)
815 818
816 819 return (lookup, modified, added, removed, deleted, unknown, ignored,
817 820 clean)
@@ -1,356 +1,354 b''
1 1 # match.py - filename matching
2 2 #
3 3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import scmutil, util, fileset
10 10 from i18n import _
11 11
12 12 def _rematcher(pat):
13 13 m = util.compilere(pat)
14 14 try:
15 15 # slightly faster, provided by facebook's re2 bindings
16 16 return m.test_match
17 17 except AttributeError:
18 18 return m.match
19 19
20 20 def _expandsets(pats, ctx):
21 21 '''convert set: patterns into a list of files in the given context'''
22 22 fset = set()
23 23 other = []
24 24
25 25 for kind, expr in pats:
26 26 if kind == 'set':
27 27 if not ctx:
28 28 raise util.Abort("fileset expression with no context")
29 29 s = fileset.getfileset(ctx, expr)
30 30 fset.update(s)
31 31 continue
32 32 other.append((kind, expr))
33 33 return fset, other
34 34
35 35 class match(object):
36 36 def __init__(self, root, cwd, patterns, include=[], exclude=[],
37 37 default='glob', exact=False, auditor=None, ctx=None):
38 38 """build an object to match a set of file patterns
39 39
40 40 arguments:
41 41 root - the canonical root of the tree you're matching against
42 42 cwd - the current working directory, if relevant
43 43 patterns - patterns to find
44 44 include - patterns to include
45 45 exclude - patterns to exclude
46 46 default - if a pattern in names has no explicit type, assume this one
47 47 exact - patterns are actually literals
48 48
49 49 a pattern is one of:
50 50 'glob:<glob>' - a glob relative to cwd
51 51 're:<regexp>' - a regular expression
52 52 'path:<path>' - a path relative to repository root
53 53 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
54 54 'relpath:<path>' - a path relative to cwd
55 55 'relre:<regexp>' - a regexp that needn't match the start of a name
56 56 'set:<fileset>' - a fileset expression
57 57 '<something>' - a pattern of the specified default type
58 58 """
59 59
60 60 self._root = root
61 61 self._cwd = cwd
62 62 self._files = []
63 63 self._anypats = bool(include or exclude)
64 64 self._ctx = ctx
65 65 self._always = False
66 66
67 67 if include:
68 68 pats = _normalize(include, 'glob', root, cwd, auditor)
69 69 self.includepat, im = _buildmatch(ctx, pats, '(?:/|$)')
70 70 if exclude:
71 71 pats = _normalize(exclude, 'glob', root, cwd, auditor)
72 72 self.excludepat, em = _buildmatch(ctx, pats, '(?:/|$)')
73 73 if exact:
74 74 if isinstance(patterns, list):
75 75 self._files = patterns
76 76 else:
77 77 self._files = list(patterns)
78 78 pm = self.exact
79 79 elif patterns:
80 80 pats = _normalize(patterns, default, root, cwd, auditor)
81 81 self._files = _roots(pats)
82 82 self._anypats = self._anypats or _anypats(pats)
83 83 self.patternspat, pm = _buildmatch(ctx, pats, '$')
84 84
85 85 if patterns or exact:
86 86 if include:
87 87 if exclude:
88 88 m = lambda f: im(f) and not em(f) and pm(f)
89 89 else:
90 90 m = lambda f: im(f) and pm(f)
91 91 else:
92 92 if exclude:
93 93 m = lambda f: not em(f) and pm(f)
94 94 else:
95 95 m = pm
96 96 else:
97 97 if include:
98 98 if exclude:
99 99 m = lambda f: im(f) and not em(f)
100 100 else:
101 101 m = im
102 102 else:
103 103 if exclude:
104 104 m = lambda f: not em(f)
105 105 else:
106 106 m = lambda f: True
107 107 self._always = True
108 108
109 109 self.matchfn = m
110 110 self._fmap = set(self._files)
111 111
112 112 def __call__(self, fn):
113 113 return self.matchfn(fn)
114 114 def __iter__(self):
115 115 for f in self._files:
116 116 yield f
117 117 def bad(self, f, msg):
118 118 '''callback for each explicit file that can't be
119 119 found/accessed, with an error message
120 120 '''
121 121 pass
122 def explicitdir(self, f):
123 pass
124 def traversedir(self, f):
125 pass
122 explicitdir = None
123 traversedir = None
126 124 def missing(self, f):
127 125 pass
128 126 def exact(self, f):
129 127 return f in self._fmap
130 128 def rel(self, f):
131 129 return util.pathto(self._root, self._cwd, f)
132 130 def files(self):
133 131 return self._files
134 132 def anypats(self):
135 133 return self._anypats
136 134 def always(self):
137 135 return self._always
138 136
139 137 class exact(match):
140 138 def __init__(self, root, cwd, files):
141 139 match.__init__(self, root, cwd, files, exact = True)
142 140
143 141 class always(match):
144 142 def __init__(self, root, cwd):
145 143 match.__init__(self, root, cwd, [])
146 144 self._always = True
147 145
148 146 class narrowmatcher(match):
149 147 """Adapt a matcher to work on a subdirectory only.
150 148
151 149 The paths are remapped to remove/insert the path as needed:
152 150
153 151 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
154 152 >>> m2 = narrowmatcher('sub', m1)
155 153 >>> bool(m2('a.txt'))
156 154 False
157 155 >>> bool(m2('b.txt'))
158 156 True
159 157 >>> bool(m2.matchfn('a.txt'))
160 158 False
161 159 >>> bool(m2.matchfn('b.txt'))
162 160 True
163 161 >>> m2.files()
164 162 ['b.txt']
165 163 >>> m2.exact('b.txt')
166 164 True
167 165 >>> m2.rel('b.txt')
168 166 'b.txt'
169 167 >>> def bad(f, msg):
170 168 ... print "%s: %s" % (f, msg)
171 169 >>> m1.bad = bad
172 170 >>> m2.bad('x.txt', 'No such file')
173 171 sub/x.txt: No such file
174 172 """
175 173
176 174 def __init__(self, path, matcher):
177 175 self._root = matcher._root
178 176 self._cwd = matcher._cwd
179 177 self._path = path
180 178 self._matcher = matcher
181 179 self._always = matcher._always
182 180
183 181 self._files = [f[len(path) + 1:] for f in matcher._files
184 182 if f.startswith(path + "/")]
185 183 self._anypats = matcher._anypats
186 184 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
187 185 self._fmap = set(self._files)
188 186
189 187 def bad(self, f, msg):
190 188 self._matcher.bad(self._path + "/" + f, msg)
191 189
192 190 def patkind(pat):
193 191 return _patsplit(pat, None)[0]
194 192
195 193 def _patsplit(pat, default):
196 194 """Split a string into an optional pattern kind prefix and the
197 195 actual pattern."""
198 196 if ':' in pat:
199 197 kind, val = pat.split(':', 1)
200 198 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
201 199 'listfile', 'listfile0', 'set'):
202 200 return kind, val
203 201 return default, pat
204 202
205 203 def _globre(pat):
206 204 "convert a glob pattern into a regexp"
207 205 i, n = 0, len(pat)
208 206 res = ''
209 207 group = 0
210 208 escape = re.escape
211 209 def peek():
212 210 return i < n and pat[i]
213 211 while i < n:
214 212 c = pat[i]
215 213 i += 1
216 214 if c not in '*?[{},\\':
217 215 res += escape(c)
218 216 elif c == '*':
219 217 if peek() == '*':
220 218 i += 1
221 219 res += '.*'
222 220 else:
223 221 res += '[^/]*'
224 222 elif c == '?':
225 223 res += '.'
226 224 elif c == '[':
227 225 j = i
228 226 if j < n and pat[j] in '!]':
229 227 j += 1
230 228 while j < n and pat[j] != ']':
231 229 j += 1
232 230 if j >= n:
233 231 res += '\\['
234 232 else:
235 233 stuff = pat[i:j].replace('\\','\\\\')
236 234 i = j + 1
237 235 if stuff[0] == '!':
238 236 stuff = '^' + stuff[1:]
239 237 elif stuff[0] == '^':
240 238 stuff = '\\' + stuff
241 239 res = '%s[%s]' % (res, stuff)
242 240 elif c == '{':
243 241 group += 1
244 242 res += '(?:'
245 243 elif c == '}' and group:
246 244 res += ')'
247 245 group -= 1
248 246 elif c == ',' and group:
249 247 res += '|'
250 248 elif c == '\\':
251 249 p = peek()
252 250 if p:
253 251 i += 1
254 252 res += escape(p)
255 253 else:
256 254 res += escape(c)
257 255 else:
258 256 res += escape(c)
259 257 return res
260 258
261 259 def _regex(kind, name, tail):
262 260 '''convert a pattern into a regular expression'''
263 261 if not name:
264 262 return ''
265 263 if kind == 're':
266 264 return name
267 265 elif kind == 'path':
268 266 return '^' + re.escape(name) + '(?:/|$)'
269 267 elif kind == 'relglob':
270 268 return '(?:|.*/)' + _globre(name) + tail
271 269 elif kind == 'relpath':
272 270 return re.escape(name) + '(?:/|$)'
273 271 elif kind == 'relre':
274 272 if name.startswith('^'):
275 273 return name
276 274 return '.*' + name
277 275 return _globre(name) + tail
278 276
279 277 def _buildmatch(ctx, pats, tail):
280 278 fset, pats = _expandsets(pats, ctx)
281 279 if not pats:
282 280 return "", fset.__contains__
283 281
284 282 pat, mf = _buildregexmatch(pats, tail)
285 283 if fset:
286 284 return pat, lambda f: f in fset or mf(f)
287 285 return pat, mf
288 286
289 287 def _buildregexmatch(pats, tail):
290 288 """build a matching function from a set of patterns"""
291 289 try:
292 290 pat = '(?:%s)' % '|'.join([_regex(k, p, tail) for (k, p) in pats])
293 291 if len(pat) > 20000:
294 292 raise OverflowError
295 293 return pat, _rematcher(pat)
296 294 except OverflowError:
297 295 # We're using a Python with a tiny regex engine and we
298 296 # made it explode, so we'll divide the pattern list in two
299 297 # until it works
300 298 l = len(pats)
301 299 if l < 2:
302 300 raise
303 301 pata, a = _buildregexmatch(pats[:l//2], tail)
304 302 patb, b = _buildregexmatch(pats[l//2:], tail)
305 303 return pat, lambda s: a(s) or b(s)
306 304 except re.error:
307 305 for k, p in pats:
308 306 try:
309 307 _rematcher('(?:%s)' % _regex(k, p, tail))
310 308 except re.error:
311 309 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
312 310 raise util.Abort(_("invalid pattern"))
313 311
314 312 def _normalize(names, default, root, cwd, auditor):
315 313 pats = []
316 314 for kind, name in [_patsplit(p, default) for p in names]:
317 315 if kind in ('glob', 'relpath'):
318 316 name = scmutil.canonpath(root, cwd, name, auditor)
319 317 elif kind in ('relglob', 'path'):
320 318 name = util.normpath(name)
321 319 elif kind in ('listfile', 'listfile0'):
322 320 try:
323 321 files = util.readfile(name)
324 322 if kind == 'listfile0':
325 323 files = files.split('\0')
326 324 else:
327 325 files = files.splitlines()
328 326 files = [f for f in files if f]
329 327 except EnvironmentError:
330 328 raise util.Abort(_("unable to read file list (%s)") % name)
331 329 pats += _normalize(files, default, root, cwd, auditor)
332 330 continue
333 331
334 332 pats.append((kind, name))
335 333 return pats
336 334
337 335 def _roots(patterns):
338 336 r = []
339 337 for kind, name in patterns:
340 338 if kind == 'glob': # find the non-glob prefix
341 339 root = []
342 340 for p in name.split('/'):
343 341 if '[' in p or '{' in p or '*' in p or '?' in p:
344 342 break
345 343 root.append(p)
346 344 r.append('/'.join(root) or '.')
347 345 elif kind in ('relpath', 'path'):
348 346 r.append(name or '.')
349 347 else: # relglob, re, relre
350 348 r.append('.')
351 349 return r
352 350
353 351 def _anypats(patterns):
354 352 for kind, name in patterns:
355 353 if kind in ('glob', 're', 'relglob', 'relre', 'set'):
356 354 return True
General Comments 0
You need to be logged in to leave comments. Login now