##// END OF EJS Templates
vfs: add removedirs
FUJIWARA Katsunori -
r24693:0d28b0df default
parent child Browse files
Show More
@@ -1,1105 +1,1110 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from mercurial.node import nullrev
9 from mercurial.node import nullrev
10 import util, error, osutil, revset, similar, encoding, phases
10 import util, error, osutil, revset, similar, encoding, phases
11 import pathutil
11 import pathutil
12 import match as matchmod
12 import match as matchmod
13 import os, errno, re, glob, tempfile, shutil, stat
13 import os, errno, re, glob, tempfile, shutil, stat
14
14
15 if os.name == 'nt':
15 if os.name == 'nt':
16 import scmwindows as scmplatform
16 import scmwindows as scmplatform
17 else:
17 else:
18 import scmposix as scmplatform
18 import scmposix as scmplatform
19
19
20 systemrcpath = scmplatform.systemrcpath
20 systemrcpath = scmplatform.systemrcpath
21 userrcpath = scmplatform.userrcpath
21 userrcpath = scmplatform.userrcpath
22
22
23 class status(tuple):
23 class status(tuple):
24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 and 'ignored' properties are only relevant to the working copy.
25 and 'ignored' properties are only relevant to the working copy.
26 '''
26 '''
27
27
28 __slots__ = ()
28 __slots__ = ()
29
29
30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 clean):
31 clean):
32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 ignored, clean))
33 ignored, clean))
34
34
35 @property
35 @property
36 def modified(self):
36 def modified(self):
37 '''files that have been modified'''
37 '''files that have been modified'''
38 return self[0]
38 return self[0]
39
39
40 @property
40 @property
41 def added(self):
41 def added(self):
42 '''files that have been added'''
42 '''files that have been added'''
43 return self[1]
43 return self[1]
44
44
45 @property
45 @property
46 def removed(self):
46 def removed(self):
47 '''files that have been removed'''
47 '''files that have been removed'''
48 return self[2]
48 return self[2]
49
49
50 @property
50 @property
51 def deleted(self):
51 def deleted(self):
52 '''files that are in the dirstate, but have been deleted from the
52 '''files that are in the dirstate, but have been deleted from the
53 working copy (aka "missing")
53 working copy (aka "missing")
54 '''
54 '''
55 return self[3]
55 return self[3]
56
56
57 @property
57 @property
58 def unknown(self):
58 def unknown(self):
59 '''files not in the dirstate that are not ignored'''
59 '''files not in the dirstate that are not ignored'''
60 return self[4]
60 return self[4]
61
61
62 @property
62 @property
63 def ignored(self):
63 def ignored(self):
64 '''files not in the dirstate that are ignored (by _dirignore())'''
64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 return self[5]
65 return self[5]
66
66
67 @property
67 @property
68 def clean(self):
68 def clean(self):
69 '''files that have not been modified'''
69 '''files that have not been modified'''
70 return self[6]
70 return self[6]
71
71
72 def __repr__(self, *args, **kwargs):
72 def __repr__(self, *args, **kwargs):
73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 'unknown=%r, ignored=%r, clean=%r>') % self)
74 'unknown=%r, ignored=%r, clean=%r>') % self)
75
75
76 def itersubrepos(ctx1, ctx2):
76 def itersubrepos(ctx1, ctx2):
77 """find subrepos in ctx1 or ctx2"""
77 """find subrepos in ctx1 or ctx2"""
78 # Create a (subpath, ctx) mapping where we prefer subpaths from
78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 # has been modified (in ctx2) but not yet committed (in ctx1).
80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 for subpath, ctx in sorted(subpaths.iteritems()):
83 for subpath, ctx in sorted(subpaths.iteritems()):
84 yield subpath, ctx.sub(subpath)
84 yield subpath, ctx.sub(subpath)
85
85
86 def nochangesfound(ui, repo, excluded=None):
86 def nochangesfound(ui, repo, excluded=None):
87 '''Report no changes for push/pull, excluded is None or a list of
87 '''Report no changes for push/pull, excluded is None or a list of
88 nodes excluded from the push/pull.
88 nodes excluded from the push/pull.
89 '''
89 '''
90 secretlist = []
90 secretlist = []
91 if excluded:
91 if excluded:
92 for n in excluded:
92 for n in excluded:
93 if n not in repo:
93 if n not in repo:
94 # discovery should not have included the filtered revision,
94 # discovery should not have included the filtered revision,
95 # we have to explicitly exclude it until discovery is cleanup.
95 # we have to explicitly exclude it until discovery is cleanup.
96 continue
96 continue
97 ctx = repo[n]
97 ctx = repo[n]
98 if ctx.phase() >= phases.secret and not ctx.extinct():
98 if ctx.phase() >= phases.secret and not ctx.extinct():
99 secretlist.append(n)
99 secretlist.append(n)
100
100
101 if secretlist:
101 if secretlist:
102 ui.status(_("no changes found (ignored %d secret changesets)\n")
102 ui.status(_("no changes found (ignored %d secret changesets)\n")
103 % len(secretlist))
103 % len(secretlist))
104 else:
104 else:
105 ui.status(_("no changes found\n"))
105 ui.status(_("no changes found\n"))
106
106
107 def checknewlabel(repo, lbl, kind):
107 def checknewlabel(repo, lbl, kind):
108 # Do not use the "kind" parameter in ui output.
108 # Do not use the "kind" parameter in ui output.
109 # It makes strings difficult to translate.
109 # It makes strings difficult to translate.
110 if lbl in ['tip', '.', 'null']:
110 if lbl in ['tip', '.', 'null']:
111 raise util.Abort(_("the name '%s' is reserved") % lbl)
111 raise util.Abort(_("the name '%s' is reserved") % lbl)
112 for c in (':', '\0', '\n', '\r'):
112 for c in (':', '\0', '\n', '\r'):
113 if c in lbl:
113 if c in lbl:
114 raise util.Abort(_("%r cannot be used in a name") % c)
114 raise util.Abort(_("%r cannot be used in a name") % c)
115 try:
115 try:
116 int(lbl)
116 int(lbl)
117 raise util.Abort(_("cannot use an integer as a name"))
117 raise util.Abort(_("cannot use an integer as a name"))
118 except ValueError:
118 except ValueError:
119 pass
119 pass
120
120
121 def checkfilename(f):
121 def checkfilename(f):
122 '''Check that the filename f is an acceptable filename for a tracked file'''
122 '''Check that the filename f is an acceptable filename for a tracked file'''
123 if '\r' in f or '\n' in f:
123 if '\r' in f or '\n' in f:
124 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
124 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
125
125
126 def checkportable(ui, f):
126 def checkportable(ui, f):
127 '''Check if filename f is portable and warn or abort depending on config'''
127 '''Check if filename f is portable and warn or abort depending on config'''
128 checkfilename(f)
128 checkfilename(f)
129 abort, warn = checkportabilityalert(ui)
129 abort, warn = checkportabilityalert(ui)
130 if abort or warn:
130 if abort or warn:
131 msg = util.checkwinfilename(f)
131 msg = util.checkwinfilename(f)
132 if msg:
132 if msg:
133 msg = "%s: %r" % (msg, f)
133 msg = "%s: %r" % (msg, f)
134 if abort:
134 if abort:
135 raise util.Abort(msg)
135 raise util.Abort(msg)
136 ui.warn(_("warning: %s\n") % msg)
136 ui.warn(_("warning: %s\n") % msg)
137
137
138 def checkportabilityalert(ui):
138 def checkportabilityalert(ui):
139 '''check if the user's config requests nothing, a warning, or abort for
139 '''check if the user's config requests nothing, a warning, or abort for
140 non-portable filenames'''
140 non-portable filenames'''
141 val = ui.config('ui', 'portablefilenames', 'warn')
141 val = ui.config('ui', 'portablefilenames', 'warn')
142 lval = val.lower()
142 lval = val.lower()
143 bval = util.parsebool(val)
143 bval = util.parsebool(val)
144 abort = os.name == 'nt' or lval == 'abort'
144 abort = os.name == 'nt' or lval == 'abort'
145 warn = bval or lval == 'warn'
145 warn = bval or lval == 'warn'
146 if bval is None and not (warn or abort or lval == 'ignore'):
146 if bval is None and not (warn or abort or lval == 'ignore'):
147 raise error.ConfigError(
147 raise error.ConfigError(
148 _("ui.portablefilenames value is invalid ('%s')") % val)
148 _("ui.portablefilenames value is invalid ('%s')") % val)
149 return abort, warn
149 return abort, warn
150
150
151 class casecollisionauditor(object):
151 class casecollisionauditor(object):
152 def __init__(self, ui, abort, dirstate):
152 def __init__(self, ui, abort, dirstate):
153 self._ui = ui
153 self._ui = ui
154 self._abort = abort
154 self._abort = abort
155 allfiles = '\0'.join(dirstate._map)
155 allfiles = '\0'.join(dirstate._map)
156 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
156 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
157 self._dirstate = dirstate
157 self._dirstate = dirstate
158 # The purpose of _newfiles is so that we don't complain about
158 # The purpose of _newfiles is so that we don't complain about
159 # case collisions if someone were to call this object with the
159 # case collisions if someone were to call this object with the
160 # same filename twice.
160 # same filename twice.
161 self._newfiles = set()
161 self._newfiles = set()
162
162
163 def __call__(self, f):
163 def __call__(self, f):
164 if f in self._newfiles:
164 if f in self._newfiles:
165 return
165 return
166 fl = encoding.lower(f)
166 fl = encoding.lower(f)
167 if fl in self._loweredfiles and f not in self._dirstate:
167 if fl in self._loweredfiles and f not in self._dirstate:
168 msg = _('possible case-folding collision for %s') % f
168 msg = _('possible case-folding collision for %s') % f
169 if self._abort:
169 if self._abort:
170 raise util.Abort(msg)
170 raise util.Abort(msg)
171 self._ui.warn(_("warning: %s\n") % msg)
171 self._ui.warn(_("warning: %s\n") % msg)
172 self._loweredfiles.add(fl)
172 self._loweredfiles.add(fl)
173 self._newfiles.add(f)
173 self._newfiles.add(f)
174
174
175 class abstractvfs(object):
175 class abstractvfs(object):
176 """Abstract base class; cannot be instantiated"""
176 """Abstract base class; cannot be instantiated"""
177
177
178 def __init__(self, *args, **kwargs):
178 def __init__(self, *args, **kwargs):
179 '''Prevent instantiation; don't call this from subclasses.'''
179 '''Prevent instantiation; don't call this from subclasses.'''
180 raise NotImplementedError('attempted instantiating ' + str(type(self)))
180 raise NotImplementedError('attempted instantiating ' + str(type(self)))
181
181
182 def tryread(self, path):
182 def tryread(self, path):
183 '''gracefully return an empty string for missing files'''
183 '''gracefully return an empty string for missing files'''
184 try:
184 try:
185 return self.read(path)
185 return self.read(path)
186 except IOError, inst:
186 except IOError, inst:
187 if inst.errno != errno.ENOENT:
187 if inst.errno != errno.ENOENT:
188 raise
188 raise
189 return ""
189 return ""
190
190
191 def tryreadlines(self, path, mode='rb'):
191 def tryreadlines(self, path, mode='rb'):
192 '''gracefully return an empty array for missing files'''
192 '''gracefully return an empty array for missing files'''
193 try:
193 try:
194 return self.readlines(path, mode=mode)
194 return self.readlines(path, mode=mode)
195 except IOError, inst:
195 except IOError, inst:
196 if inst.errno != errno.ENOENT:
196 if inst.errno != errno.ENOENT:
197 raise
197 raise
198 return []
198 return []
199
199
200 def open(self, path, mode="r", text=False, atomictemp=False,
200 def open(self, path, mode="r", text=False, atomictemp=False,
201 notindexed=False):
201 notindexed=False):
202 '''Open ``path`` file, which is relative to vfs root.
202 '''Open ``path`` file, which is relative to vfs root.
203
203
204 Newly created directories are marked as "not to be indexed by
204 Newly created directories are marked as "not to be indexed by
205 the content indexing service", if ``notindexed`` is specified
205 the content indexing service", if ``notindexed`` is specified
206 for "write" mode access.
206 for "write" mode access.
207 '''
207 '''
208 self.open = self.__call__
208 self.open = self.__call__
209 return self.__call__(path, mode, text, atomictemp, notindexed)
209 return self.__call__(path, mode, text, atomictemp, notindexed)
210
210
211 def read(self, path):
211 def read(self, path):
212 fp = self(path, 'rb')
212 fp = self(path, 'rb')
213 try:
213 try:
214 return fp.read()
214 return fp.read()
215 finally:
215 finally:
216 fp.close()
216 fp.close()
217
217
218 def readlines(self, path, mode='rb'):
218 def readlines(self, path, mode='rb'):
219 fp = self(path, mode=mode)
219 fp = self(path, mode=mode)
220 try:
220 try:
221 return fp.readlines()
221 return fp.readlines()
222 finally:
222 finally:
223 fp.close()
223 fp.close()
224
224
225 def write(self, path, data):
225 def write(self, path, data):
226 fp = self(path, 'wb')
226 fp = self(path, 'wb')
227 try:
227 try:
228 return fp.write(data)
228 return fp.write(data)
229 finally:
229 finally:
230 fp.close()
230 fp.close()
231
231
232 def writelines(self, path, data, mode='wb', notindexed=False):
232 def writelines(self, path, data, mode='wb', notindexed=False):
233 fp = self(path, mode=mode, notindexed=notindexed)
233 fp = self(path, mode=mode, notindexed=notindexed)
234 try:
234 try:
235 return fp.writelines(data)
235 return fp.writelines(data)
236 finally:
236 finally:
237 fp.close()
237 fp.close()
238
238
239 def append(self, path, data):
239 def append(self, path, data):
240 fp = self(path, 'ab')
240 fp = self(path, 'ab')
241 try:
241 try:
242 return fp.write(data)
242 return fp.write(data)
243 finally:
243 finally:
244 fp.close()
244 fp.close()
245
245
246 def chmod(self, path, mode):
246 def chmod(self, path, mode):
247 return os.chmod(self.join(path), mode)
247 return os.chmod(self.join(path), mode)
248
248
249 def exists(self, path=None):
249 def exists(self, path=None):
250 return os.path.exists(self.join(path))
250 return os.path.exists(self.join(path))
251
251
252 def fstat(self, fp):
252 def fstat(self, fp):
253 return util.fstat(fp)
253 return util.fstat(fp)
254
254
255 def isdir(self, path=None):
255 def isdir(self, path=None):
256 return os.path.isdir(self.join(path))
256 return os.path.isdir(self.join(path))
257
257
258 def isfile(self, path=None):
258 def isfile(self, path=None):
259 return os.path.isfile(self.join(path))
259 return os.path.isfile(self.join(path))
260
260
261 def islink(self, path=None):
261 def islink(self, path=None):
262 return os.path.islink(self.join(path))
262 return os.path.islink(self.join(path))
263
263
264 def reljoin(self, *paths):
264 def reljoin(self, *paths):
265 """join various elements of a path together (as os.path.join would do)
265 """join various elements of a path together (as os.path.join would do)
266
266
267 The vfs base is not injected so that path stay relative. This exists
267 The vfs base is not injected so that path stay relative. This exists
268 to allow handling of strange encoding if needed."""
268 to allow handling of strange encoding if needed."""
269 return os.path.join(*paths)
269 return os.path.join(*paths)
270
270
271 def split(self, path):
271 def split(self, path):
272 """split top-most element of a path (as os.path.split would do)
272 """split top-most element of a path (as os.path.split would do)
273
273
274 This exists to allow handling of strange encoding if needed."""
274 This exists to allow handling of strange encoding if needed."""
275 return os.path.split(path)
275 return os.path.split(path)
276
276
277 def lexists(self, path=None):
277 def lexists(self, path=None):
278 return os.path.lexists(self.join(path))
278 return os.path.lexists(self.join(path))
279
279
280 def lstat(self, path=None):
280 def lstat(self, path=None):
281 return os.lstat(self.join(path))
281 return os.lstat(self.join(path))
282
282
283 def listdir(self, path=None):
283 def listdir(self, path=None):
284 return os.listdir(self.join(path))
284 return os.listdir(self.join(path))
285
285
286 def makedir(self, path=None, notindexed=True):
286 def makedir(self, path=None, notindexed=True):
287 return util.makedir(self.join(path), notindexed)
287 return util.makedir(self.join(path), notindexed)
288
288
289 def makedirs(self, path=None, mode=None):
289 def makedirs(self, path=None, mode=None):
290 return util.makedirs(self.join(path), mode)
290 return util.makedirs(self.join(path), mode)
291
291
292 def makelock(self, info, path):
292 def makelock(self, info, path):
293 return util.makelock(info, self.join(path))
293 return util.makelock(info, self.join(path))
294
294
295 def mkdir(self, path=None):
295 def mkdir(self, path=None):
296 return os.mkdir(self.join(path))
296 return os.mkdir(self.join(path))
297
297
298 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
298 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
299 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
299 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
300 dir=self.join(dir), text=text)
300 dir=self.join(dir), text=text)
301 dname, fname = util.split(name)
301 dname, fname = util.split(name)
302 if dir:
302 if dir:
303 return fd, os.path.join(dir, fname)
303 return fd, os.path.join(dir, fname)
304 else:
304 else:
305 return fd, fname
305 return fd, fname
306
306
307 def readdir(self, path=None, stat=None, skip=None):
307 def readdir(self, path=None, stat=None, skip=None):
308 return osutil.listdir(self.join(path), stat, skip)
308 return osutil.listdir(self.join(path), stat, skip)
309
309
310 def readlock(self, path):
310 def readlock(self, path):
311 return util.readlock(self.join(path))
311 return util.readlock(self.join(path))
312
312
313 def rename(self, src, dst):
313 def rename(self, src, dst):
314 return util.rename(self.join(src), self.join(dst))
314 return util.rename(self.join(src), self.join(dst))
315
315
316 def readlink(self, path):
316 def readlink(self, path):
317 return os.readlink(self.join(path))
317 return os.readlink(self.join(path))
318
318
319 def removedirs(self, path=None):
320 """Remove a leaf directory and all empty intermediate ones
321 """
322 return util.removedirs(self.join(path))
323
319 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
324 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
320 """Remove a directory tree recursively
325 """Remove a directory tree recursively
321
326
322 If ``forcibly``, this tries to remove READ-ONLY files, too.
327 If ``forcibly``, this tries to remove READ-ONLY files, too.
323 """
328 """
324 if forcibly:
329 if forcibly:
325 def onerror(function, path, excinfo):
330 def onerror(function, path, excinfo):
326 if function is not os.remove:
331 if function is not os.remove:
327 raise
332 raise
328 # read-only files cannot be unlinked under Windows
333 # read-only files cannot be unlinked under Windows
329 s = os.stat(path)
334 s = os.stat(path)
330 if (s.st_mode & stat.S_IWRITE) != 0:
335 if (s.st_mode & stat.S_IWRITE) != 0:
331 raise
336 raise
332 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
337 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
333 os.remove(path)
338 os.remove(path)
334 else:
339 else:
335 onerror = None
340 onerror = None
336 return shutil.rmtree(self.join(path),
341 return shutil.rmtree(self.join(path),
337 ignore_errors=ignore_errors, onerror=onerror)
342 ignore_errors=ignore_errors, onerror=onerror)
338
343
339 def setflags(self, path, l, x):
344 def setflags(self, path, l, x):
340 return util.setflags(self.join(path), l, x)
345 return util.setflags(self.join(path), l, x)
341
346
342 def stat(self, path=None):
347 def stat(self, path=None):
343 return os.stat(self.join(path))
348 return os.stat(self.join(path))
344
349
345 def unlink(self, path=None):
350 def unlink(self, path=None):
346 return util.unlink(self.join(path))
351 return util.unlink(self.join(path))
347
352
348 def unlinkpath(self, path=None, ignoremissing=False):
353 def unlinkpath(self, path=None, ignoremissing=False):
349 return util.unlinkpath(self.join(path), ignoremissing)
354 return util.unlinkpath(self.join(path), ignoremissing)
350
355
351 def utime(self, path=None, t=None):
356 def utime(self, path=None, t=None):
352 return os.utime(self.join(path), t)
357 return os.utime(self.join(path), t)
353
358
354 class vfs(abstractvfs):
359 class vfs(abstractvfs):
355 '''Operate files relative to a base directory
360 '''Operate files relative to a base directory
356
361
357 This class is used to hide the details of COW semantics and
362 This class is used to hide the details of COW semantics and
358 remote file access from higher level code.
363 remote file access from higher level code.
359 '''
364 '''
360 def __init__(self, base, audit=True, expandpath=False, realpath=False):
365 def __init__(self, base, audit=True, expandpath=False, realpath=False):
361 if expandpath:
366 if expandpath:
362 base = util.expandpath(base)
367 base = util.expandpath(base)
363 if realpath:
368 if realpath:
364 base = os.path.realpath(base)
369 base = os.path.realpath(base)
365 self.base = base
370 self.base = base
366 self._setmustaudit(audit)
371 self._setmustaudit(audit)
367 self.createmode = None
372 self.createmode = None
368 self._trustnlink = None
373 self._trustnlink = None
369
374
370 def _getmustaudit(self):
375 def _getmustaudit(self):
371 return self._audit
376 return self._audit
372
377
373 def _setmustaudit(self, onoff):
378 def _setmustaudit(self, onoff):
374 self._audit = onoff
379 self._audit = onoff
375 if onoff:
380 if onoff:
376 self.audit = pathutil.pathauditor(self.base)
381 self.audit = pathutil.pathauditor(self.base)
377 else:
382 else:
378 self.audit = util.always
383 self.audit = util.always
379
384
380 mustaudit = property(_getmustaudit, _setmustaudit)
385 mustaudit = property(_getmustaudit, _setmustaudit)
381
386
382 @util.propertycache
387 @util.propertycache
383 def _cansymlink(self):
388 def _cansymlink(self):
384 return util.checklink(self.base)
389 return util.checklink(self.base)
385
390
386 @util.propertycache
391 @util.propertycache
387 def _chmod(self):
392 def _chmod(self):
388 return util.checkexec(self.base)
393 return util.checkexec(self.base)
389
394
390 def _fixfilemode(self, name):
395 def _fixfilemode(self, name):
391 if self.createmode is None or not self._chmod:
396 if self.createmode is None or not self._chmod:
392 return
397 return
393 os.chmod(name, self.createmode & 0666)
398 os.chmod(name, self.createmode & 0666)
394
399
395 def __call__(self, path, mode="r", text=False, atomictemp=False,
400 def __call__(self, path, mode="r", text=False, atomictemp=False,
396 notindexed=False):
401 notindexed=False):
397 '''Open ``path`` file, which is relative to vfs root.
402 '''Open ``path`` file, which is relative to vfs root.
398
403
399 Newly created directories are marked as "not to be indexed by
404 Newly created directories are marked as "not to be indexed by
400 the content indexing service", if ``notindexed`` is specified
405 the content indexing service", if ``notindexed`` is specified
401 for "write" mode access.
406 for "write" mode access.
402 '''
407 '''
403 if self._audit:
408 if self._audit:
404 r = util.checkosfilename(path)
409 r = util.checkosfilename(path)
405 if r:
410 if r:
406 raise util.Abort("%s: %r" % (r, path))
411 raise util.Abort("%s: %r" % (r, path))
407 self.audit(path)
412 self.audit(path)
408 f = self.join(path)
413 f = self.join(path)
409
414
410 if not text and "b" not in mode:
415 if not text and "b" not in mode:
411 mode += "b" # for that other OS
416 mode += "b" # for that other OS
412
417
413 nlink = -1
418 nlink = -1
414 if mode not in ('r', 'rb'):
419 if mode not in ('r', 'rb'):
415 dirname, basename = util.split(f)
420 dirname, basename = util.split(f)
416 # If basename is empty, then the path is malformed because it points
421 # If basename is empty, then the path is malformed because it points
417 # to a directory. Let the posixfile() call below raise IOError.
422 # to a directory. Let the posixfile() call below raise IOError.
418 if basename:
423 if basename:
419 if atomictemp:
424 if atomictemp:
420 util.ensuredirs(dirname, self.createmode, notindexed)
425 util.ensuredirs(dirname, self.createmode, notindexed)
421 return util.atomictempfile(f, mode, self.createmode)
426 return util.atomictempfile(f, mode, self.createmode)
422 try:
427 try:
423 if 'w' in mode:
428 if 'w' in mode:
424 util.unlink(f)
429 util.unlink(f)
425 nlink = 0
430 nlink = 0
426 else:
431 else:
427 # nlinks() may behave differently for files on Windows
432 # nlinks() may behave differently for files on Windows
428 # shares if the file is open.
433 # shares if the file is open.
429 fd = util.posixfile(f)
434 fd = util.posixfile(f)
430 nlink = util.nlinks(f)
435 nlink = util.nlinks(f)
431 if nlink < 1:
436 if nlink < 1:
432 nlink = 2 # force mktempcopy (issue1922)
437 nlink = 2 # force mktempcopy (issue1922)
433 fd.close()
438 fd.close()
434 except (OSError, IOError), e:
439 except (OSError, IOError), e:
435 if e.errno != errno.ENOENT:
440 if e.errno != errno.ENOENT:
436 raise
441 raise
437 nlink = 0
442 nlink = 0
438 util.ensuredirs(dirname, self.createmode, notindexed)
443 util.ensuredirs(dirname, self.createmode, notindexed)
439 if nlink > 0:
444 if nlink > 0:
440 if self._trustnlink is None:
445 if self._trustnlink is None:
441 self._trustnlink = nlink > 1 or util.checknlink(f)
446 self._trustnlink = nlink > 1 or util.checknlink(f)
442 if nlink > 1 or not self._trustnlink:
447 if nlink > 1 or not self._trustnlink:
443 util.rename(util.mktempcopy(f), f)
448 util.rename(util.mktempcopy(f), f)
444 fp = util.posixfile(f, mode)
449 fp = util.posixfile(f, mode)
445 if nlink == 0:
450 if nlink == 0:
446 self._fixfilemode(f)
451 self._fixfilemode(f)
447 return fp
452 return fp
448
453
449 def symlink(self, src, dst):
454 def symlink(self, src, dst):
450 self.audit(dst)
455 self.audit(dst)
451 linkname = self.join(dst)
456 linkname = self.join(dst)
452 try:
457 try:
453 os.unlink(linkname)
458 os.unlink(linkname)
454 except OSError:
459 except OSError:
455 pass
460 pass
456
461
457 util.ensuredirs(os.path.dirname(linkname), self.createmode)
462 util.ensuredirs(os.path.dirname(linkname), self.createmode)
458
463
459 if self._cansymlink:
464 if self._cansymlink:
460 try:
465 try:
461 os.symlink(src, linkname)
466 os.symlink(src, linkname)
462 except OSError, err:
467 except OSError, err:
463 raise OSError(err.errno, _('could not symlink to %r: %s') %
468 raise OSError(err.errno, _('could not symlink to %r: %s') %
464 (src, err.strerror), linkname)
469 (src, err.strerror), linkname)
465 else:
470 else:
466 self.write(dst, src)
471 self.write(dst, src)
467
472
468 def join(self, path, *insidef):
473 def join(self, path, *insidef):
469 if path:
474 if path:
470 return os.path.join(self.base, path, *insidef)
475 return os.path.join(self.base, path, *insidef)
471 else:
476 else:
472 return self.base
477 return self.base
473
478
474 opener = vfs
479 opener = vfs
475
480
476 class auditvfs(object):
481 class auditvfs(object):
477 def __init__(self, vfs):
482 def __init__(self, vfs):
478 self.vfs = vfs
483 self.vfs = vfs
479
484
480 def _getmustaudit(self):
485 def _getmustaudit(self):
481 return self.vfs.mustaudit
486 return self.vfs.mustaudit
482
487
483 def _setmustaudit(self, onoff):
488 def _setmustaudit(self, onoff):
484 self.vfs.mustaudit = onoff
489 self.vfs.mustaudit = onoff
485
490
486 mustaudit = property(_getmustaudit, _setmustaudit)
491 mustaudit = property(_getmustaudit, _setmustaudit)
487
492
488 class filtervfs(abstractvfs, auditvfs):
493 class filtervfs(abstractvfs, auditvfs):
489 '''Wrapper vfs for filtering filenames with a function.'''
494 '''Wrapper vfs for filtering filenames with a function.'''
490
495
491 def __init__(self, vfs, filter):
496 def __init__(self, vfs, filter):
492 auditvfs.__init__(self, vfs)
497 auditvfs.__init__(self, vfs)
493 self._filter = filter
498 self._filter = filter
494
499
495 def __call__(self, path, *args, **kwargs):
500 def __call__(self, path, *args, **kwargs):
496 return self.vfs(self._filter(path), *args, **kwargs)
501 return self.vfs(self._filter(path), *args, **kwargs)
497
502
498 def join(self, path, *insidef):
503 def join(self, path, *insidef):
499 if path:
504 if path:
500 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
505 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
501 else:
506 else:
502 return self.vfs.join(path)
507 return self.vfs.join(path)
503
508
504 filteropener = filtervfs
509 filteropener = filtervfs
505
510
506 class readonlyvfs(abstractvfs, auditvfs):
511 class readonlyvfs(abstractvfs, auditvfs):
507 '''Wrapper vfs preventing any writing.'''
512 '''Wrapper vfs preventing any writing.'''
508
513
509 def __init__(self, vfs):
514 def __init__(self, vfs):
510 auditvfs.__init__(self, vfs)
515 auditvfs.__init__(self, vfs)
511
516
512 def __call__(self, path, mode='r', *args, **kw):
517 def __call__(self, path, mode='r', *args, **kw):
513 if mode not in ('r', 'rb'):
518 if mode not in ('r', 'rb'):
514 raise util.Abort('this vfs is read only')
519 raise util.Abort('this vfs is read only')
515 return self.vfs(path, mode, *args, **kw)
520 return self.vfs(path, mode, *args, **kw)
516
521
517
522
518 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
523 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
519 '''yield every hg repository under path, always recursively.
524 '''yield every hg repository under path, always recursively.
520 The recurse flag will only control recursion into repo working dirs'''
525 The recurse flag will only control recursion into repo working dirs'''
521 def errhandler(err):
526 def errhandler(err):
522 if err.filename == path:
527 if err.filename == path:
523 raise err
528 raise err
524 samestat = getattr(os.path, 'samestat', None)
529 samestat = getattr(os.path, 'samestat', None)
525 if followsym and samestat is not None:
530 if followsym and samestat is not None:
526 def adddir(dirlst, dirname):
531 def adddir(dirlst, dirname):
527 match = False
532 match = False
528 dirstat = os.stat(dirname)
533 dirstat = os.stat(dirname)
529 for lstdirstat in dirlst:
534 for lstdirstat in dirlst:
530 if samestat(dirstat, lstdirstat):
535 if samestat(dirstat, lstdirstat):
531 match = True
536 match = True
532 break
537 break
533 if not match:
538 if not match:
534 dirlst.append(dirstat)
539 dirlst.append(dirstat)
535 return not match
540 return not match
536 else:
541 else:
537 followsym = False
542 followsym = False
538
543
539 if (seen_dirs is None) and followsym:
544 if (seen_dirs is None) and followsym:
540 seen_dirs = []
545 seen_dirs = []
541 adddir(seen_dirs, path)
546 adddir(seen_dirs, path)
542 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
547 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
543 dirs.sort()
548 dirs.sort()
544 if '.hg' in dirs:
549 if '.hg' in dirs:
545 yield root # found a repository
550 yield root # found a repository
546 qroot = os.path.join(root, '.hg', 'patches')
551 qroot = os.path.join(root, '.hg', 'patches')
547 if os.path.isdir(os.path.join(qroot, '.hg')):
552 if os.path.isdir(os.path.join(qroot, '.hg')):
548 yield qroot # we have a patch queue repo here
553 yield qroot # we have a patch queue repo here
549 if recurse:
554 if recurse:
550 # avoid recursing inside the .hg directory
555 # avoid recursing inside the .hg directory
551 dirs.remove('.hg')
556 dirs.remove('.hg')
552 else:
557 else:
553 dirs[:] = [] # don't descend further
558 dirs[:] = [] # don't descend further
554 elif followsym:
559 elif followsym:
555 newdirs = []
560 newdirs = []
556 for d in dirs:
561 for d in dirs:
557 fname = os.path.join(root, d)
562 fname = os.path.join(root, d)
558 if adddir(seen_dirs, fname):
563 if adddir(seen_dirs, fname):
559 if os.path.islink(fname):
564 if os.path.islink(fname):
560 for hgname in walkrepos(fname, True, seen_dirs):
565 for hgname in walkrepos(fname, True, seen_dirs):
561 yield hgname
566 yield hgname
562 else:
567 else:
563 newdirs.append(d)
568 newdirs.append(d)
564 dirs[:] = newdirs
569 dirs[:] = newdirs
565
570
566 def osrcpath():
571 def osrcpath():
567 '''return default os-specific hgrc search path'''
572 '''return default os-specific hgrc search path'''
568 path = []
573 path = []
569 defaultpath = os.path.join(util.datapath, 'default.d')
574 defaultpath = os.path.join(util.datapath, 'default.d')
570 if os.path.isdir(defaultpath):
575 if os.path.isdir(defaultpath):
571 for f, kind in osutil.listdir(defaultpath):
576 for f, kind in osutil.listdir(defaultpath):
572 if f.endswith('.rc'):
577 if f.endswith('.rc'):
573 path.append(os.path.join(defaultpath, f))
578 path.append(os.path.join(defaultpath, f))
574 path.extend(systemrcpath())
579 path.extend(systemrcpath())
575 path.extend(userrcpath())
580 path.extend(userrcpath())
576 path = [os.path.normpath(f) for f in path]
581 path = [os.path.normpath(f) for f in path]
577 return path
582 return path
578
583
579 _rcpath = None
584 _rcpath = None
580
585
581 def rcpath():
586 def rcpath():
582 '''return hgrc search path. if env var HGRCPATH is set, use it.
587 '''return hgrc search path. if env var HGRCPATH is set, use it.
583 for each item in path, if directory, use files ending in .rc,
588 for each item in path, if directory, use files ending in .rc,
584 else use item.
589 else use item.
585 make HGRCPATH empty to only look in .hg/hgrc of current repo.
590 make HGRCPATH empty to only look in .hg/hgrc of current repo.
586 if no HGRCPATH, use default os-specific path.'''
591 if no HGRCPATH, use default os-specific path.'''
587 global _rcpath
592 global _rcpath
588 if _rcpath is None:
593 if _rcpath is None:
589 if 'HGRCPATH' in os.environ:
594 if 'HGRCPATH' in os.environ:
590 _rcpath = []
595 _rcpath = []
591 for p in os.environ['HGRCPATH'].split(os.pathsep):
596 for p in os.environ['HGRCPATH'].split(os.pathsep):
592 if not p:
597 if not p:
593 continue
598 continue
594 p = util.expandpath(p)
599 p = util.expandpath(p)
595 if os.path.isdir(p):
600 if os.path.isdir(p):
596 for f, kind in osutil.listdir(p):
601 for f, kind in osutil.listdir(p):
597 if f.endswith('.rc'):
602 if f.endswith('.rc'):
598 _rcpath.append(os.path.join(p, f))
603 _rcpath.append(os.path.join(p, f))
599 else:
604 else:
600 _rcpath.append(p)
605 _rcpath.append(p)
601 else:
606 else:
602 _rcpath = osrcpath()
607 _rcpath = osrcpath()
603 return _rcpath
608 return _rcpath
604
609
605 def intrev(repo, rev):
610 def intrev(repo, rev):
606 """Return integer for a given revision that can be used in comparison or
611 """Return integer for a given revision that can be used in comparison or
607 arithmetic operation"""
612 arithmetic operation"""
608 if rev is None:
613 if rev is None:
609 return len(repo)
614 return len(repo)
610 return rev
615 return rev
611
616
612 def revsingle(repo, revspec, default='.'):
617 def revsingle(repo, revspec, default='.'):
613 if not revspec and revspec != 0:
618 if not revspec and revspec != 0:
614 return repo[default]
619 return repo[default]
615
620
616 l = revrange(repo, [revspec])
621 l = revrange(repo, [revspec])
617 if not l:
622 if not l:
618 raise util.Abort(_('empty revision set'))
623 raise util.Abort(_('empty revision set'))
619 return repo[l.last()]
624 return repo[l.last()]
620
625
621 def revpair(repo, revs):
626 def revpair(repo, revs):
622 if not revs:
627 if not revs:
623 return repo.dirstate.p1(), None
628 return repo.dirstate.p1(), None
624
629
625 l = revrange(repo, revs)
630 l = revrange(repo, revs)
626
631
627 if not l:
632 if not l:
628 first = second = None
633 first = second = None
629 elif l.isascending():
634 elif l.isascending():
630 first = l.min()
635 first = l.min()
631 second = l.max()
636 second = l.max()
632 elif l.isdescending():
637 elif l.isdescending():
633 first = l.max()
638 first = l.max()
634 second = l.min()
639 second = l.min()
635 else:
640 else:
636 first = l.first()
641 first = l.first()
637 second = l.last()
642 second = l.last()
638
643
639 if first is None:
644 if first is None:
640 raise util.Abort(_('empty revision range'))
645 raise util.Abort(_('empty revision range'))
641
646
642 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
647 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
643 return repo.lookup(first), None
648 return repo.lookup(first), None
644
649
645 return repo.lookup(first), repo.lookup(second)
650 return repo.lookup(first), repo.lookup(second)
646
651
647 _revrangesep = ':'
652 _revrangesep = ':'
648
653
649 def revrange(repo, revs):
654 def revrange(repo, revs):
650 """Yield revision as strings from a list of revision specifications."""
655 """Yield revision as strings from a list of revision specifications."""
651
656
652 def revfix(repo, val, defval):
657 def revfix(repo, val, defval):
653 if not val and val != 0 and defval is not None:
658 if not val and val != 0 and defval is not None:
654 return defval
659 return defval
655 return repo[val].rev()
660 return repo[val].rev()
656
661
657 seen, l = set(), revset.baseset([])
662 seen, l = set(), revset.baseset([])
658
663
659 revsetaliases = [alias for (alias, _) in
664 revsetaliases = [alias for (alias, _) in
660 repo.ui.configitems("revsetalias")]
665 repo.ui.configitems("revsetalias")]
661
666
662 for spec in revs:
667 for spec in revs:
663 if l and not seen:
668 if l and not seen:
664 seen = set(l)
669 seen = set(l)
665 # attempt to parse old-style ranges first to deal with
670 # attempt to parse old-style ranges first to deal with
666 # things like old-tag which contain query metacharacters
671 # things like old-tag which contain query metacharacters
667 try:
672 try:
668 # ... except for revset aliases without arguments. These
673 # ... except for revset aliases without arguments. These
669 # should be parsed as soon as possible, because they might
674 # should be parsed as soon as possible, because they might
670 # clash with a hash prefix.
675 # clash with a hash prefix.
671 if spec in revsetaliases:
676 if spec in revsetaliases:
672 raise error.RepoLookupError
677 raise error.RepoLookupError
673
678
674 if isinstance(spec, int):
679 if isinstance(spec, int):
675 seen.add(spec)
680 seen.add(spec)
676 l = l + revset.baseset([spec])
681 l = l + revset.baseset([spec])
677 continue
682 continue
678
683
679 if _revrangesep in spec:
684 if _revrangesep in spec:
680 start, end = spec.split(_revrangesep, 1)
685 start, end = spec.split(_revrangesep, 1)
681 if start in revsetaliases or end in revsetaliases:
686 if start in revsetaliases or end in revsetaliases:
682 raise error.RepoLookupError
687 raise error.RepoLookupError
683
688
684 start = revfix(repo, start, 0)
689 start = revfix(repo, start, 0)
685 end = revfix(repo, end, len(repo) - 1)
690 end = revfix(repo, end, len(repo) - 1)
686 if end == nullrev and start < 0:
691 if end == nullrev and start < 0:
687 start = nullrev
692 start = nullrev
688 rangeiter = repo.changelog.revs(start, end)
693 rangeiter = repo.changelog.revs(start, end)
689 if not seen and not l:
694 if not seen and not l:
690 # by far the most common case: revs = ["-1:0"]
695 # by far the most common case: revs = ["-1:0"]
691 l = revset.baseset(rangeiter)
696 l = revset.baseset(rangeiter)
692 # defer syncing seen until next iteration
697 # defer syncing seen until next iteration
693 continue
698 continue
694 newrevs = set(rangeiter)
699 newrevs = set(rangeiter)
695 if seen:
700 if seen:
696 newrevs.difference_update(seen)
701 newrevs.difference_update(seen)
697 seen.update(newrevs)
702 seen.update(newrevs)
698 else:
703 else:
699 seen = newrevs
704 seen = newrevs
700 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
705 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
701 continue
706 continue
702 elif spec and spec in repo: # single unquoted rev
707 elif spec and spec in repo: # single unquoted rev
703 rev = revfix(repo, spec, None)
708 rev = revfix(repo, spec, None)
704 if rev in seen:
709 if rev in seen:
705 continue
710 continue
706 seen.add(rev)
711 seen.add(rev)
707 l = l + revset.baseset([rev])
712 l = l + revset.baseset([rev])
708 continue
713 continue
709 except error.RepoLookupError:
714 except error.RepoLookupError:
710 pass
715 pass
711
716
712 # fall through to new-style queries if old-style fails
717 # fall through to new-style queries if old-style fails
713 m = revset.match(repo.ui, spec, repo)
718 m = revset.match(repo.ui, spec, repo)
714 if seen or l:
719 if seen or l:
715 dl = [r for r in m(repo) if r not in seen]
720 dl = [r for r in m(repo) if r not in seen]
716 l = l + revset.baseset(dl)
721 l = l + revset.baseset(dl)
717 seen.update(dl)
722 seen.update(dl)
718 else:
723 else:
719 l = m(repo)
724 l = m(repo)
720
725
721 return l
726 return l
722
727
723 def expandpats(pats):
728 def expandpats(pats):
724 '''Expand bare globs when running on windows.
729 '''Expand bare globs when running on windows.
725 On posix we assume it already has already been done by sh.'''
730 On posix we assume it already has already been done by sh.'''
726 if not util.expandglobs:
731 if not util.expandglobs:
727 return list(pats)
732 return list(pats)
728 ret = []
733 ret = []
729 for kindpat in pats:
734 for kindpat in pats:
730 kind, pat = matchmod._patsplit(kindpat, None)
735 kind, pat = matchmod._patsplit(kindpat, None)
731 if kind is None:
736 if kind is None:
732 try:
737 try:
733 globbed = glob.glob(pat)
738 globbed = glob.glob(pat)
734 except re.error:
739 except re.error:
735 globbed = [pat]
740 globbed = [pat]
736 if globbed:
741 if globbed:
737 ret.extend(globbed)
742 ret.extend(globbed)
738 continue
743 continue
739 ret.append(kindpat)
744 ret.append(kindpat)
740 return ret
745 return ret
741
746
742 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
747 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
743 '''Return a matcher and the patterns that were used.
748 '''Return a matcher and the patterns that were used.
744 The matcher will warn about bad matches.'''
749 The matcher will warn about bad matches.'''
745 if pats == ("",):
750 if pats == ("",):
746 pats = []
751 pats = []
747 if not globbed and default == 'relpath':
752 if not globbed and default == 'relpath':
748 pats = expandpats(pats or [])
753 pats = expandpats(pats or [])
749
754
750 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
755 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
751 default)
756 default)
752 def badfn(f, msg):
757 def badfn(f, msg):
753 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
758 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
754 m.bad = badfn
759 m.bad = badfn
755 if m.always():
760 if m.always():
756 pats = []
761 pats = []
757 return m, pats
762 return m, pats
758
763
759 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
764 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
760 '''Return a matcher that will warn about bad matches.'''
765 '''Return a matcher that will warn about bad matches.'''
761 return matchandpats(ctx, pats, opts, globbed, default)[0]
766 return matchandpats(ctx, pats, opts, globbed, default)[0]
762
767
763 def matchall(repo):
768 def matchall(repo):
764 '''Return a matcher that will efficiently match everything.'''
769 '''Return a matcher that will efficiently match everything.'''
765 return matchmod.always(repo.root, repo.getcwd())
770 return matchmod.always(repo.root, repo.getcwd())
766
771
767 def matchfiles(repo, files):
772 def matchfiles(repo, files):
768 '''Return a matcher that will efficiently match exactly these files.'''
773 '''Return a matcher that will efficiently match exactly these files.'''
769 return matchmod.exact(repo.root, repo.getcwd(), files)
774 return matchmod.exact(repo.root, repo.getcwd(), files)
770
775
771 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
776 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
772 m = matcher
777 m = matcher
773 if dry_run is None:
778 if dry_run is None:
774 dry_run = opts.get('dry_run')
779 dry_run = opts.get('dry_run')
775 if similarity is None:
780 if similarity is None:
776 similarity = float(opts.get('similarity') or 0)
781 similarity = float(opts.get('similarity') or 0)
777
782
778 ret = 0
783 ret = 0
779 join = lambda f: os.path.join(prefix, f)
784 join = lambda f: os.path.join(prefix, f)
780
785
781 def matchessubrepo(matcher, subpath):
786 def matchessubrepo(matcher, subpath):
782 if matcher.exact(subpath):
787 if matcher.exact(subpath):
783 return True
788 return True
784 for f in matcher.files():
789 for f in matcher.files():
785 if f.startswith(subpath):
790 if f.startswith(subpath):
786 return True
791 return True
787 return False
792 return False
788
793
789 wctx = repo[None]
794 wctx = repo[None]
790 for subpath in sorted(wctx.substate):
795 for subpath in sorted(wctx.substate):
791 if opts.get('subrepos') or matchessubrepo(m, subpath):
796 if opts.get('subrepos') or matchessubrepo(m, subpath):
792 sub = wctx.sub(subpath)
797 sub = wctx.sub(subpath)
793 try:
798 try:
794 submatch = matchmod.narrowmatcher(subpath, m)
799 submatch = matchmod.narrowmatcher(subpath, m)
795 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
800 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
796 ret = 1
801 ret = 1
797 except error.LookupError:
802 except error.LookupError:
798 repo.ui.status(_("skipping missing subrepository: %s\n")
803 repo.ui.status(_("skipping missing subrepository: %s\n")
799 % join(subpath))
804 % join(subpath))
800
805
801 rejected = []
806 rejected = []
802 origbad = m.bad
807 origbad = m.bad
803 def badfn(f, msg):
808 def badfn(f, msg):
804 if f in m.files():
809 if f in m.files():
805 origbad(f, msg)
810 origbad(f, msg)
806 rejected.append(f)
811 rejected.append(f)
807
812
808 m.bad = badfn
813 m.bad = badfn
809 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
814 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
810 m.bad = origbad
815 m.bad = origbad
811
816
812 unknownset = set(unknown + forgotten)
817 unknownset = set(unknown + forgotten)
813 toprint = unknownset.copy()
818 toprint = unknownset.copy()
814 toprint.update(deleted)
819 toprint.update(deleted)
815 for abs in sorted(toprint):
820 for abs in sorted(toprint):
816 if repo.ui.verbose or not m.exact(abs):
821 if repo.ui.verbose or not m.exact(abs):
817 if abs in unknownset:
822 if abs in unknownset:
818 status = _('adding %s\n') % m.uipath(abs)
823 status = _('adding %s\n') % m.uipath(abs)
819 else:
824 else:
820 status = _('removing %s\n') % m.uipath(abs)
825 status = _('removing %s\n') % m.uipath(abs)
821 repo.ui.status(status)
826 repo.ui.status(status)
822
827
823 renames = _findrenames(repo, m, added + unknown, removed + deleted,
828 renames = _findrenames(repo, m, added + unknown, removed + deleted,
824 similarity)
829 similarity)
825
830
826 if not dry_run:
831 if not dry_run:
827 _markchanges(repo, unknown + forgotten, deleted, renames)
832 _markchanges(repo, unknown + forgotten, deleted, renames)
828
833
829 for f in rejected:
834 for f in rejected:
830 if f in m.files():
835 if f in m.files():
831 return 1
836 return 1
832 return ret
837 return ret
833
838
834 def marktouched(repo, files, similarity=0.0):
839 def marktouched(repo, files, similarity=0.0):
835 '''Assert that files have somehow been operated upon. files are relative to
840 '''Assert that files have somehow been operated upon. files are relative to
836 the repo root.'''
841 the repo root.'''
837 m = matchfiles(repo, files)
842 m = matchfiles(repo, files)
838 rejected = []
843 rejected = []
839 m.bad = lambda x, y: rejected.append(x)
844 m.bad = lambda x, y: rejected.append(x)
840
845
841 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
846 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
842
847
843 if repo.ui.verbose:
848 if repo.ui.verbose:
844 unknownset = set(unknown + forgotten)
849 unknownset = set(unknown + forgotten)
845 toprint = unknownset.copy()
850 toprint = unknownset.copy()
846 toprint.update(deleted)
851 toprint.update(deleted)
847 for abs in sorted(toprint):
852 for abs in sorted(toprint):
848 if abs in unknownset:
853 if abs in unknownset:
849 status = _('adding %s\n') % abs
854 status = _('adding %s\n') % abs
850 else:
855 else:
851 status = _('removing %s\n') % abs
856 status = _('removing %s\n') % abs
852 repo.ui.status(status)
857 repo.ui.status(status)
853
858
854 renames = _findrenames(repo, m, added + unknown, removed + deleted,
859 renames = _findrenames(repo, m, added + unknown, removed + deleted,
855 similarity)
860 similarity)
856
861
857 _markchanges(repo, unknown + forgotten, deleted, renames)
862 _markchanges(repo, unknown + forgotten, deleted, renames)
858
863
859 for f in rejected:
864 for f in rejected:
860 if f in m.files():
865 if f in m.files():
861 return 1
866 return 1
862 return 0
867 return 0
863
868
864 def _interestingfiles(repo, matcher):
869 def _interestingfiles(repo, matcher):
865 '''Walk dirstate with matcher, looking for files that addremove would care
870 '''Walk dirstate with matcher, looking for files that addremove would care
866 about.
871 about.
867
872
868 This is different from dirstate.status because it doesn't care about
873 This is different from dirstate.status because it doesn't care about
869 whether files are modified or clean.'''
874 whether files are modified or clean.'''
870 added, unknown, deleted, removed, forgotten = [], [], [], [], []
875 added, unknown, deleted, removed, forgotten = [], [], [], [], []
871 audit_path = pathutil.pathauditor(repo.root)
876 audit_path = pathutil.pathauditor(repo.root)
872
877
873 ctx = repo[None]
878 ctx = repo[None]
874 dirstate = repo.dirstate
879 dirstate = repo.dirstate
875 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
880 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
876 full=False)
881 full=False)
877 for abs, st in walkresults.iteritems():
882 for abs, st in walkresults.iteritems():
878 dstate = dirstate[abs]
883 dstate = dirstate[abs]
879 if dstate == '?' and audit_path.check(abs):
884 if dstate == '?' and audit_path.check(abs):
880 unknown.append(abs)
885 unknown.append(abs)
881 elif dstate != 'r' and not st:
886 elif dstate != 'r' and not st:
882 deleted.append(abs)
887 deleted.append(abs)
883 elif dstate == 'r' and st:
888 elif dstate == 'r' and st:
884 forgotten.append(abs)
889 forgotten.append(abs)
885 # for finding renames
890 # for finding renames
886 elif dstate == 'r' and not st:
891 elif dstate == 'r' and not st:
887 removed.append(abs)
892 removed.append(abs)
888 elif dstate == 'a':
893 elif dstate == 'a':
889 added.append(abs)
894 added.append(abs)
890
895
891 return added, unknown, deleted, removed, forgotten
896 return added, unknown, deleted, removed, forgotten
892
897
893 def _findrenames(repo, matcher, added, removed, similarity):
898 def _findrenames(repo, matcher, added, removed, similarity):
894 '''Find renames from removed files to added ones.'''
899 '''Find renames from removed files to added ones.'''
895 renames = {}
900 renames = {}
896 if similarity > 0:
901 if similarity > 0:
897 for old, new, score in similar.findrenames(repo, added, removed,
902 for old, new, score in similar.findrenames(repo, added, removed,
898 similarity):
903 similarity):
899 if (repo.ui.verbose or not matcher.exact(old)
904 if (repo.ui.verbose or not matcher.exact(old)
900 or not matcher.exact(new)):
905 or not matcher.exact(new)):
901 repo.ui.status(_('recording removal of %s as rename to %s '
906 repo.ui.status(_('recording removal of %s as rename to %s '
902 '(%d%% similar)\n') %
907 '(%d%% similar)\n') %
903 (matcher.rel(old), matcher.rel(new),
908 (matcher.rel(old), matcher.rel(new),
904 score * 100))
909 score * 100))
905 renames[new] = old
910 renames[new] = old
906 return renames
911 return renames
907
912
908 def _markchanges(repo, unknown, deleted, renames):
913 def _markchanges(repo, unknown, deleted, renames):
909 '''Marks the files in unknown as added, the files in deleted as removed,
914 '''Marks the files in unknown as added, the files in deleted as removed,
910 and the files in renames as copied.'''
915 and the files in renames as copied.'''
911 wctx = repo[None]
916 wctx = repo[None]
912 wlock = repo.wlock()
917 wlock = repo.wlock()
913 try:
918 try:
914 wctx.forget(deleted)
919 wctx.forget(deleted)
915 wctx.add(unknown)
920 wctx.add(unknown)
916 for new, old in renames.iteritems():
921 for new, old in renames.iteritems():
917 wctx.copy(old, new)
922 wctx.copy(old, new)
918 finally:
923 finally:
919 wlock.release()
924 wlock.release()
920
925
921 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
926 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
922 """Update the dirstate to reflect the intent of copying src to dst. For
927 """Update the dirstate to reflect the intent of copying src to dst. For
923 different reasons it might not end with dst being marked as copied from src.
928 different reasons it might not end with dst being marked as copied from src.
924 """
929 """
925 origsrc = repo.dirstate.copied(src) or src
930 origsrc = repo.dirstate.copied(src) or src
926 if dst == origsrc: # copying back a copy?
931 if dst == origsrc: # copying back a copy?
927 if repo.dirstate[dst] not in 'mn' and not dryrun:
932 if repo.dirstate[dst] not in 'mn' and not dryrun:
928 repo.dirstate.normallookup(dst)
933 repo.dirstate.normallookup(dst)
929 else:
934 else:
930 if repo.dirstate[origsrc] == 'a' and origsrc == src:
935 if repo.dirstate[origsrc] == 'a' and origsrc == src:
931 if not ui.quiet:
936 if not ui.quiet:
932 ui.warn(_("%s has not been committed yet, so no copy "
937 ui.warn(_("%s has not been committed yet, so no copy "
933 "data will be stored for %s.\n")
938 "data will be stored for %s.\n")
934 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
939 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
935 if repo.dirstate[dst] in '?r' and not dryrun:
940 if repo.dirstate[dst] in '?r' and not dryrun:
936 wctx.add([dst])
941 wctx.add([dst])
937 elif not dryrun:
942 elif not dryrun:
938 wctx.copy(origsrc, dst)
943 wctx.copy(origsrc, dst)
939
944
940 def readrequires(opener, supported):
945 def readrequires(opener, supported):
941 '''Reads and parses .hg/requires and checks if all entries found
946 '''Reads and parses .hg/requires and checks if all entries found
942 are in the list of supported features.'''
947 are in the list of supported features.'''
943 requirements = set(opener.read("requires").splitlines())
948 requirements = set(opener.read("requires").splitlines())
944 missings = []
949 missings = []
945 for r in requirements:
950 for r in requirements:
946 if r not in supported:
951 if r not in supported:
947 if not r or not r[0].isalnum():
952 if not r or not r[0].isalnum():
948 raise error.RequirementError(_(".hg/requires file is corrupt"))
953 raise error.RequirementError(_(".hg/requires file is corrupt"))
949 missings.append(r)
954 missings.append(r)
950 missings.sort()
955 missings.sort()
951 if missings:
956 if missings:
952 raise error.RequirementError(
957 raise error.RequirementError(
953 _("repository requires features unknown to this Mercurial: %s")
958 _("repository requires features unknown to this Mercurial: %s")
954 % " ".join(missings),
959 % " ".join(missings),
955 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
960 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
956 " for more information"))
961 " for more information"))
957 return requirements
962 return requirements
958
963
959 class filecachesubentry(object):
964 class filecachesubentry(object):
960 def __init__(self, path, stat):
965 def __init__(self, path, stat):
961 self.path = path
966 self.path = path
962 self.cachestat = None
967 self.cachestat = None
963 self._cacheable = None
968 self._cacheable = None
964
969
965 if stat:
970 if stat:
966 self.cachestat = filecachesubentry.stat(self.path)
971 self.cachestat = filecachesubentry.stat(self.path)
967
972
968 if self.cachestat:
973 if self.cachestat:
969 self._cacheable = self.cachestat.cacheable()
974 self._cacheable = self.cachestat.cacheable()
970 else:
975 else:
971 # None means we don't know yet
976 # None means we don't know yet
972 self._cacheable = None
977 self._cacheable = None
973
978
974 def refresh(self):
979 def refresh(self):
975 if self.cacheable():
980 if self.cacheable():
976 self.cachestat = filecachesubentry.stat(self.path)
981 self.cachestat = filecachesubentry.stat(self.path)
977
982
978 def cacheable(self):
983 def cacheable(self):
979 if self._cacheable is not None:
984 if self._cacheable is not None:
980 return self._cacheable
985 return self._cacheable
981
986
982 # we don't know yet, assume it is for now
987 # we don't know yet, assume it is for now
983 return True
988 return True
984
989
985 def changed(self):
990 def changed(self):
986 # no point in going further if we can't cache it
991 # no point in going further if we can't cache it
987 if not self.cacheable():
992 if not self.cacheable():
988 return True
993 return True
989
994
990 newstat = filecachesubentry.stat(self.path)
995 newstat = filecachesubentry.stat(self.path)
991
996
992 # we may not know if it's cacheable yet, check again now
997 # we may not know if it's cacheable yet, check again now
993 if newstat and self._cacheable is None:
998 if newstat and self._cacheable is None:
994 self._cacheable = newstat.cacheable()
999 self._cacheable = newstat.cacheable()
995
1000
996 # check again
1001 # check again
997 if not self._cacheable:
1002 if not self._cacheable:
998 return True
1003 return True
999
1004
1000 if self.cachestat != newstat:
1005 if self.cachestat != newstat:
1001 self.cachestat = newstat
1006 self.cachestat = newstat
1002 return True
1007 return True
1003 else:
1008 else:
1004 return False
1009 return False
1005
1010
1006 @staticmethod
1011 @staticmethod
1007 def stat(path):
1012 def stat(path):
1008 try:
1013 try:
1009 return util.cachestat(path)
1014 return util.cachestat(path)
1010 except OSError, e:
1015 except OSError, e:
1011 if e.errno != errno.ENOENT:
1016 if e.errno != errno.ENOENT:
1012 raise
1017 raise
1013
1018
1014 class filecacheentry(object):
1019 class filecacheentry(object):
1015 def __init__(self, paths, stat=True):
1020 def __init__(self, paths, stat=True):
1016 self._entries = []
1021 self._entries = []
1017 for path in paths:
1022 for path in paths:
1018 self._entries.append(filecachesubentry(path, stat))
1023 self._entries.append(filecachesubentry(path, stat))
1019
1024
1020 def changed(self):
1025 def changed(self):
1021 '''true if any entry has changed'''
1026 '''true if any entry has changed'''
1022 for entry in self._entries:
1027 for entry in self._entries:
1023 if entry.changed():
1028 if entry.changed():
1024 return True
1029 return True
1025 return False
1030 return False
1026
1031
1027 def refresh(self):
1032 def refresh(self):
1028 for entry in self._entries:
1033 for entry in self._entries:
1029 entry.refresh()
1034 entry.refresh()
1030
1035
1031 class filecache(object):
1036 class filecache(object):
1032 '''A property like decorator that tracks files under .hg/ for updates.
1037 '''A property like decorator that tracks files under .hg/ for updates.
1033
1038
1034 Records stat info when called in _filecache.
1039 Records stat info when called in _filecache.
1035
1040
1036 On subsequent calls, compares old stat info with new info, and recreates the
1041 On subsequent calls, compares old stat info with new info, and recreates the
1037 object when any of the files changes, updating the new stat info in
1042 object when any of the files changes, updating the new stat info in
1038 _filecache.
1043 _filecache.
1039
1044
1040 Mercurial either atomic renames or appends for files under .hg,
1045 Mercurial either atomic renames or appends for files under .hg,
1041 so to ensure the cache is reliable we need the filesystem to be able
1046 so to ensure the cache is reliable we need the filesystem to be able
1042 to tell us if a file has been replaced. If it can't, we fallback to
1047 to tell us if a file has been replaced. If it can't, we fallback to
1043 recreating the object on every call (essentially the same behaviour as
1048 recreating the object on every call (essentially the same behaviour as
1044 propertycache).
1049 propertycache).
1045
1050
1046 '''
1051 '''
1047 def __init__(self, *paths):
1052 def __init__(self, *paths):
1048 self.paths = paths
1053 self.paths = paths
1049
1054
1050 def join(self, obj, fname):
1055 def join(self, obj, fname):
1051 """Used to compute the runtime path of a cached file.
1056 """Used to compute the runtime path of a cached file.
1052
1057
1053 Users should subclass filecache and provide their own version of this
1058 Users should subclass filecache and provide their own version of this
1054 function to call the appropriate join function on 'obj' (an instance
1059 function to call the appropriate join function on 'obj' (an instance
1055 of the class that its member function was decorated).
1060 of the class that its member function was decorated).
1056 """
1061 """
1057 return obj.join(fname)
1062 return obj.join(fname)
1058
1063
1059 def __call__(self, func):
1064 def __call__(self, func):
1060 self.func = func
1065 self.func = func
1061 self.name = func.__name__
1066 self.name = func.__name__
1062 return self
1067 return self
1063
1068
1064 def __get__(self, obj, type=None):
1069 def __get__(self, obj, type=None):
1065 # do we need to check if the file changed?
1070 # do we need to check if the file changed?
1066 if self.name in obj.__dict__:
1071 if self.name in obj.__dict__:
1067 assert self.name in obj._filecache, self.name
1072 assert self.name in obj._filecache, self.name
1068 return obj.__dict__[self.name]
1073 return obj.__dict__[self.name]
1069
1074
1070 entry = obj._filecache.get(self.name)
1075 entry = obj._filecache.get(self.name)
1071
1076
1072 if entry:
1077 if entry:
1073 if entry.changed():
1078 if entry.changed():
1074 entry.obj = self.func(obj)
1079 entry.obj = self.func(obj)
1075 else:
1080 else:
1076 paths = [self.join(obj, path) for path in self.paths]
1081 paths = [self.join(obj, path) for path in self.paths]
1077
1082
1078 # We stat -before- creating the object so our cache doesn't lie if
1083 # We stat -before- creating the object so our cache doesn't lie if
1079 # a writer modified between the time we read and stat
1084 # a writer modified between the time we read and stat
1080 entry = filecacheentry(paths, True)
1085 entry = filecacheentry(paths, True)
1081 entry.obj = self.func(obj)
1086 entry.obj = self.func(obj)
1082
1087
1083 obj._filecache[self.name] = entry
1088 obj._filecache[self.name] = entry
1084
1089
1085 obj.__dict__[self.name] = entry.obj
1090 obj.__dict__[self.name] = entry.obj
1086 return entry.obj
1091 return entry.obj
1087
1092
1088 def __set__(self, obj, value):
1093 def __set__(self, obj, value):
1089 if self.name not in obj._filecache:
1094 if self.name not in obj._filecache:
1090 # we add an entry for the missing value because X in __dict__
1095 # we add an entry for the missing value because X in __dict__
1091 # implies X in _filecache
1096 # implies X in _filecache
1092 paths = [self.join(obj, path) for path in self.paths]
1097 paths = [self.join(obj, path) for path in self.paths]
1093 ce = filecacheentry(paths, False)
1098 ce = filecacheentry(paths, False)
1094 obj._filecache[self.name] = ce
1099 obj._filecache[self.name] = ce
1095 else:
1100 else:
1096 ce = obj._filecache[self.name]
1101 ce = obj._filecache[self.name]
1097
1102
1098 ce.obj = value # update cached copy
1103 ce.obj = value # update cached copy
1099 obj.__dict__[self.name] = value # update copy returned by obj.x
1104 obj.__dict__[self.name] = value # update copy returned by obj.x
1100
1105
1101 def __delete__(self, obj):
1106 def __delete__(self, obj):
1102 try:
1107 try:
1103 del obj.__dict__[self.name]
1108 del obj.__dict__[self.name]
1104 except KeyError:
1109 except KeyError:
1105 raise AttributeError(self.name)
1110 raise AttributeError(self.name)
General Comments 0
You need to be logged in to leave comments. Login now