##// END OF EJS Templates
scmutil: use the optional badfn argument when building a matcher
Matt Harbison -
r25466:007a1d53 default
parent child Browse files
Show More
@@ -1,1161 +1,1162
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from mercurial.node import nullrev
9 from mercurial.node import nullrev
10 import util, error, osutil, revset, similar, encoding, phases
10 import util, error, osutil, revset, similar, encoding, phases
11 import pathutil
11 import pathutil
12 import match as matchmod
12 import match as matchmod
13 import os, errno, re, glob, tempfile, shutil, stat, inspect
13 import os, errno, re, glob, tempfile, shutil, stat, inspect
14
14
15 if os.name == 'nt':
15 if os.name == 'nt':
16 import scmwindows as scmplatform
16 import scmwindows as scmplatform
17 else:
17 else:
18 import scmposix as scmplatform
18 import scmposix as scmplatform
19
19
20 systemrcpath = scmplatform.systemrcpath
20 systemrcpath = scmplatform.systemrcpath
21 userrcpath = scmplatform.userrcpath
21 userrcpath = scmplatform.userrcpath
22
22
23 class status(tuple):
23 class status(tuple):
24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 and 'ignored' properties are only relevant to the working copy.
25 and 'ignored' properties are only relevant to the working copy.
26 '''
26 '''
27
27
28 __slots__ = ()
28 __slots__ = ()
29
29
30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 clean):
31 clean):
32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 ignored, clean))
33 ignored, clean))
34
34
35 @property
35 @property
36 def modified(self):
36 def modified(self):
37 '''files that have been modified'''
37 '''files that have been modified'''
38 return self[0]
38 return self[0]
39
39
40 @property
40 @property
41 def added(self):
41 def added(self):
42 '''files that have been added'''
42 '''files that have been added'''
43 return self[1]
43 return self[1]
44
44
45 @property
45 @property
46 def removed(self):
46 def removed(self):
47 '''files that have been removed'''
47 '''files that have been removed'''
48 return self[2]
48 return self[2]
49
49
50 @property
50 @property
51 def deleted(self):
51 def deleted(self):
52 '''files that are in the dirstate, but have been deleted from the
52 '''files that are in the dirstate, but have been deleted from the
53 working copy (aka "missing")
53 working copy (aka "missing")
54 '''
54 '''
55 return self[3]
55 return self[3]
56
56
57 @property
57 @property
58 def unknown(self):
58 def unknown(self):
59 '''files not in the dirstate that are not ignored'''
59 '''files not in the dirstate that are not ignored'''
60 return self[4]
60 return self[4]
61
61
62 @property
62 @property
63 def ignored(self):
63 def ignored(self):
64 '''files not in the dirstate that are ignored (by _dirignore())'''
64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 return self[5]
65 return self[5]
66
66
67 @property
67 @property
68 def clean(self):
68 def clean(self):
69 '''files that have not been modified'''
69 '''files that have not been modified'''
70 return self[6]
70 return self[6]
71
71
72 def __repr__(self, *args, **kwargs):
72 def __repr__(self, *args, **kwargs):
73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 'unknown=%r, ignored=%r, clean=%r>') % self)
74 'unknown=%r, ignored=%r, clean=%r>') % self)
75
75
76 def itersubrepos(ctx1, ctx2):
76 def itersubrepos(ctx1, ctx2):
77 """find subrepos in ctx1 or ctx2"""
77 """find subrepos in ctx1 or ctx2"""
78 # Create a (subpath, ctx) mapping where we prefer subpaths from
78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 # has been modified (in ctx2) but not yet committed (in ctx1).
80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83
83
84 missing = set()
84 missing = set()
85
85
86 for subpath in ctx2.substate:
86 for subpath in ctx2.substate:
87 if subpath not in ctx1.substate:
87 if subpath not in ctx1.substate:
88 del subpaths[subpath]
88 del subpaths[subpath]
89 missing.add(subpath)
89 missing.add(subpath)
90
90
91 for subpath, ctx in sorted(subpaths.iteritems()):
91 for subpath, ctx in sorted(subpaths.iteritems()):
92 yield subpath, ctx.sub(subpath)
92 yield subpath, ctx.sub(subpath)
93
93
94 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
94 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
95 # status and diff will have an accurate result when it does
95 # status and diff will have an accurate result when it does
96 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
96 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
97 # against itself.
97 # against itself.
98 for subpath in missing:
98 for subpath in missing:
99 yield subpath, ctx2.nullsub(subpath, ctx1)
99 yield subpath, ctx2.nullsub(subpath, ctx1)
100
100
101 def nochangesfound(ui, repo, excluded=None):
101 def nochangesfound(ui, repo, excluded=None):
102 '''Report no changes for push/pull, excluded is None or a list of
102 '''Report no changes for push/pull, excluded is None or a list of
103 nodes excluded from the push/pull.
103 nodes excluded from the push/pull.
104 '''
104 '''
105 secretlist = []
105 secretlist = []
106 if excluded:
106 if excluded:
107 for n in excluded:
107 for n in excluded:
108 if n not in repo:
108 if n not in repo:
109 # discovery should not have included the filtered revision,
109 # discovery should not have included the filtered revision,
110 # we have to explicitly exclude it until discovery is cleanup.
110 # we have to explicitly exclude it until discovery is cleanup.
111 continue
111 continue
112 ctx = repo[n]
112 ctx = repo[n]
113 if ctx.phase() >= phases.secret and not ctx.extinct():
113 if ctx.phase() >= phases.secret and not ctx.extinct():
114 secretlist.append(n)
114 secretlist.append(n)
115
115
116 if secretlist:
116 if secretlist:
117 ui.status(_("no changes found (ignored %d secret changesets)\n")
117 ui.status(_("no changes found (ignored %d secret changesets)\n")
118 % len(secretlist))
118 % len(secretlist))
119 else:
119 else:
120 ui.status(_("no changes found\n"))
120 ui.status(_("no changes found\n"))
121
121
122 def checknewlabel(repo, lbl, kind):
122 def checknewlabel(repo, lbl, kind):
123 # Do not use the "kind" parameter in ui output.
123 # Do not use the "kind" parameter in ui output.
124 # It makes strings difficult to translate.
124 # It makes strings difficult to translate.
125 if lbl in ['tip', '.', 'null']:
125 if lbl in ['tip', '.', 'null']:
126 raise util.Abort(_("the name '%s' is reserved") % lbl)
126 raise util.Abort(_("the name '%s' is reserved") % lbl)
127 for c in (':', '\0', '\n', '\r'):
127 for c in (':', '\0', '\n', '\r'):
128 if c in lbl:
128 if c in lbl:
129 raise util.Abort(_("%r cannot be used in a name") % c)
129 raise util.Abort(_("%r cannot be used in a name") % c)
130 try:
130 try:
131 int(lbl)
131 int(lbl)
132 raise util.Abort(_("cannot use an integer as a name"))
132 raise util.Abort(_("cannot use an integer as a name"))
133 except ValueError:
133 except ValueError:
134 pass
134 pass
135
135
136 def checkfilename(f):
136 def checkfilename(f):
137 '''Check that the filename f is an acceptable filename for a tracked file'''
137 '''Check that the filename f is an acceptable filename for a tracked file'''
138 if '\r' in f or '\n' in f:
138 if '\r' in f or '\n' in f:
139 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
139 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
140
140
141 def checkportable(ui, f):
141 def checkportable(ui, f):
142 '''Check if filename f is portable and warn or abort depending on config'''
142 '''Check if filename f is portable and warn or abort depending on config'''
143 checkfilename(f)
143 checkfilename(f)
144 abort, warn = checkportabilityalert(ui)
144 abort, warn = checkportabilityalert(ui)
145 if abort or warn:
145 if abort or warn:
146 msg = util.checkwinfilename(f)
146 msg = util.checkwinfilename(f)
147 if msg:
147 if msg:
148 msg = "%s: %r" % (msg, f)
148 msg = "%s: %r" % (msg, f)
149 if abort:
149 if abort:
150 raise util.Abort(msg)
150 raise util.Abort(msg)
151 ui.warn(_("warning: %s\n") % msg)
151 ui.warn(_("warning: %s\n") % msg)
152
152
153 def checkportabilityalert(ui):
153 def checkportabilityalert(ui):
154 '''check if the user's config requests nothing, a warning, or abort for
154 '''check if the user's config requests nothing, a warning, or abort for
155 non-portable filenames'''
155 non-portable filenames'''
156 val = ui.config('ui', 'portablefilenames', 'warn')
156 val = ui.config('ui', 'portablefilenames', 'warn')
157 lval = val.lower()
157 lval = val.lower()
158 bval = util.parsebool(val)
158 bval = util.parsebool(val)
159 abort = os.name == 'nt' or lval == 'abort'
159 abort = os.name == 'nt' or lval == 'abort'
160 warn = bval or lval == 'warn'
160 warn = bval or lval == 'warn'
161 if bval is None and not (warn or abort or lval == 'ignore'):
161 if bval is None and not (warn or abort or lval == 'ignore'):
162 raise error.ConfigError(
162 raise error.ConfigError(
163 _("ui.portablefilenames value is invalid ('%s')") % val)
163 _("ui.portablefilenames value is invalid ('%s')") % val)
164 return abort, warn
164 return abort, warn
165
165
166 class casecollisionauditor(object):
166 class casecollisionauditor(object):
167 def __init__(self, ui, abort, dirstate):
167 def __init__(self, ui, abort, dirstate):
168 self._ui = ui
168 self._ui = ui
169 self._abort = abort
169 self._abort = abort
170 allfiles = '\0'.join(dirstate._map)
170 allfiles = '\0'.join(dirstate._map)
171 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
171 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
172 self._dirstate = dirstate
172 self._dirstate = dirstate
173 # The purpose of _newfiles is so that we don't complain about
173 # The purpose of _newfiles is so that we don't complain about
174 # case collisions if someone were to call this object with the
174 # case collisions if someone were to call this object with the
175 # same filename twice.
175 # same filename twice.
176 self._newfiles = set()
176 self._newfiles = set()
177
177
178 def __call__(self, f):
178 def __call__(self, f):
179 if f in self._newfiles:
179 if f in self._newfiles:
180 return
180 return
181 fl = encoding.lower(f)
181 fl = encoding.lower(f)
182 if fl in self._loweredfiles and f not in self._dirstate:
182 if fl in self._loweredfiles and f not in self._dirstate:
183 msg = _('possible case-folding collision for %s') % f
183 msg = _('possible case-folding collision for %s') % f
184 if self._abort:
184 if self._abort:
185 raise util.Abort(msg)
185 raise util.Abort(msg)
186 self._ui.warn(_("warning: %s\n") % msg)
186 self._ui.warn(_("warning: %s\n") % msg)
187 self._loweredfiles.add(fl)
187 self._loweredfiles.add(fl)
188 self._newfiles.add(f)
188 self._newfiles.add(f)
189
189
190 def develwarn(tui, msg):
190 def develwarn(tui, msg):
191 """issue a developer warning message"""
191 """issue a developer warning message"""
192 msg = 'devel-warn: ' + msg
192 msg = 'devel-warn: ' + msg
193 if tui.tracebackflag:
193 if tui.tracebackflag:
194 util.debugstacktrace(msg, 2)
194 util.debugstacktrace(msg, 2)
195 else:
195 else:
196 curframe = inspect.currentframe()
196 curframe = inspect.currentframe()
197 calframe = inspect.getouterframes(curframe, 2)
197 calframe = inspect.getouterframes(curframe, 2)
198 tui.write_err('%s at: %s:%s (%s)\n' % ((msg,) + calframe[2][1:4]))
198 tui.write_err('%s at: %s:%s (%s)\n' % ((msg,) + calframe[2][1:4]))
199
199
200 def filteredhash(repo, maxrev):
200 def filteredhash(repo, maxrev):
201 """build hash of filtered revisions in the current repoview.
201 """build hash of filtered revisions in the current repoview.
202
202
203 Multiple caches perform up-to-date validation by checking that the
203 Multiple caches perform up-to-date validation by checking that the
204 tiprev and tipnode stored in the cache file match the current repository.
204 tiprev and tipnode stored in the cache file match the current repository.
205 However, this is not sufficient for validating repoviews because the set
205 However, this is not sufficient for validating repoviews because the set
206 of revisions in the view may change without the repository tiprev and
206 of revisions in the view may change without the repository tiprev and
207 tipnode changing.
207 tipnode changing.
208
208
209 This function hashes all the revs filtered from the view and returns
209 This function hashes all the revs filtered from the view and returns
210 that SHA-1 digest.
210 that SHA-1 digest.
211 """
211 """
212 cl = repo.changelog
212 cl = repo.changelog
213 if not cl.filteredrevs:
213 if not cl.filteredrevs:
214 return None
214 return None
215 key = None
215 key = None
216 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
216 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
217 if revs:
217 if revs:
218 s = util.sha1()
218 s = util.sha1()
219 for rev in revs:
219 for rev in revs:
220 s.update('%s;' % rev)
220 s.update('%s;' % rev)
221 key = s.digest()
221 key = s.digest()
222 return key
222 return key
223
223
224 class abstractvfs(object):
224 class abstractvfs(object):
225 """Abstract base class; cannot be instantiated"""
225 """Abstract base class; cannot be instantiated"""
226
226
227 def __init__(self, *args, **kwargs):
227 def __init__(self, *args, **kwargs):
228 '''Prevent instantiation; don't call this from subclasses.'''
228 '''Prevent instantiation; don't call this from subclasses.'''
229 raise NotImplementedError('attempted instantiating ' + str(type(self)))
229 raise NotImplementedError('attempted instantiating ' + str(type(self)))
230
230
231 def tryread(self, path):
231 def tryread(self, path):
232 '''gracefully return an empty string for missing files'''
232 '''gracefully return an empty string for missing files'''
233 try:
233 try:
234 return self.read(path)
234 return self.read(path)
235 except IOError, inst:
235 except IOError, inst:
236 if inst.errno != errno.ENOENT:
236 if inst.errno != errno.ENOENT:
237 raise
237 raise
238 return ""
238 return ""
239
239
240 def tryreadlines(self, path, mode='rb'):
240 def tryreadlines(self, path, mode='rb'):
241 '''gracefully return an empty array for missing files'''
241 '''gracefully return an empty array for missing files'''
242 try:
242 try:
243 return self.readlines(path, mode=mode)
243 return self.readlines(path, mode=mode)
244 except IOError, inst:
244 except IOError, inst:
245 if inst.errno != errno.ENOENT:
245 if inst.errno != errno.ENOENT:
246 raise
246 raise
247 return []
247 return []
248
248
249 def open(self, path, mode="r", text=False, atomictemp=False,
249 def open(self, path, mode="r", text=False, atomictemp=False,
250 notindexed=False):
250 notindexed=False):
251 '''Open ``path`` file, which is relative to vfs root.
251 '''Open ``path`` file, which is relative to vfs root.
252
252
253 Newly created directories are marked as "not to be indexed by
253 Newly created directories are marked as "not to be indexed by
254 the content indexing service", if ``notindexed`` is specified
254 the content indexing service", if ``notindexed`` is specified
255 for "write" mode access.
255 for "write" mode access.
256 '''
256 '''
257 self.open = self.__call__
257 self.open = self.__call__
258 return self.__call__(path, mode, text, atomictemp, notindexed)
258 return self.__call__(path, mode, text, atomictemp, notindexed)
259
259
260 def read(self, path):
260 def read(self, path):
261 fp = self(path, 'rb')
261 fp = self(path, 'rb')
262 try:
262 try:
263 return fp.read()
263 return fp.read()
264 finally:
264 finally:
265 fp.close()
265 fp.close()
266
266
267 def readlines(self, path, mode='rb'):
267 def readlines(self, path, mode='rb'):
268 fp = self(path, mode=mode)
268 fp = self(path, mode=mode)
269 try:
269 try:
270 return fp.readlines()
270 return fp.readlines()
271 finally:
271 finally:
272 fp.close()
272 fp.close()
273
273
274 def write(self, path, data):
274 def write(self, path, data):
275 fp = self(path, 'wb')
275 fp = self(path, 'wb')
276 try:
276 try:
277 return fp.write(data)
277 return fp.write(data)
278 finally:
278 finally:
279 fp.close()
279 fp.close()
280
280
281 def writelines(self, path, data, mode='wb', notindexed=False):
281 def writelines(self, path, data, mode='wb', notindexed=False):
282 fp = self(path, mode=mode, notindexed=notindexed)
282 fp = self(path, mode=mode, notindexed=notindexed)
283 try:
283 try:
284 return fp.writelines(data)
284 return fp.writelines(data)
285 finally:
285 finally:
286 fp.close()
286 fp.close()
287
287
288 def append(self, path, data):
288 def append(self, path, data):
289 fp = self(path, 'ab')
289 fp = self(path, 'ab')
290 try:
290 try:
291 return fp.write(data)
291 return fp.write(data)
292 finally:
292 finally:
293 fp.close()
293 fp.close()
294
294
295 def chmod(self, path, mode):
295 def chmod(self, path, mode):
296 return os.chmod(self.join(path), mode)
296 return os.chmod(self.join(path), mode)
297
297
298 def exists(self, path=None):
298 def exists(self, path=None):
299 return os.path.exists(self.join(path))
299 return os.path.exists(self.join(path))
300
300
301 def fstat(self, fp):
301 def fstat(self, fp):
302 return util.fstat(fp)
302 return util.fstat(fp)
303
303
304 def isdir(self, path=None):
304 def isdir(self, path=None):
305 return os.path.isdir(self.join(path))
305 return os.path.isdir(self.join(path))
306
306
307 def isfile(self, path=None):
307 def isfile(self, path=None):
308 return os.path.isfile(self.join(path))
308 return os.path.isfile(self.join(path))
309
309
310 def islink(self, path=None):
310 def islink(self, path=None):
311 return os.path.islink(self.join(path))
311 return os.path.islink(self.join(path))
312
312
313 def reljoin(self, *paths):
313 def reljoin(self, *paths):
314 """join various elements of a path together (as os.path.join would do)
314 """join various elements of a path together (as os.path.join would do)
315
315
316 The vfs base is not injected so that path stay relative. This exists
316 The vfs base is not injected so that path stay relative. This exists
317 to allow handling of strange encoding if needed."""
317 to allow handling of strange encoding if needed."""
318 return os.path.join(*paths)
318 return os.path.join(*paths)
319
319
320 def split(self, path):
320 def split(self, path):
321 """split top-most element of a path (as os.path.split would do)
321 """split top-most element of a path (as os.path.split would do)
322
322
323 This exists to allow handling of strange encoding if needed."""
323 This exists to allow handling of strange encoding if needed."""
324 return os.path.split(path)
324 return os.path.split(path)
325
325
326 def lexists(self, path=None):
326 def lexists(self, path=None):
327 return os.path.lexists(self.join(path))
327 return os.path.lexists(self.join(path))
328
328
329 def lstat(self, path=None):
329 def lstat(self, path=None):
330 return os.lstat(self.join(path))
330 return os.lstat(self.join(path))
331
331
332 def listdir(self, path=None):
332 def listdir(self, path=None):
333 return os.listdir(self.join(path))
333 return os.listdir(self.join(path))
334
334
335 def makedir(self, path=None, notindexed=True):
335 def makedir(self, path=None, notindexed=True):
336 return util.makedir(self.join(path), notindexed)
336 return util.makedir(self.join(path), notindexed)
337
337
338 def makedirs(self, path=None, mode=None):
338 def makedirs(self, path=None, mode=None):
339 return util.makedirs(self.join(path), mode)
339 return util.makedirs(self.join(path), mode)
340
340
341 def makelock(self, info, path):
341 def makelock(self, info, path):
342 return util.makelock(info, self.join(path))
342 return util.makelock(info, self.join(path))
343
343
344 def mkdir(self, path=None):
344 def mkdir(self, path=None):
345 return os.mkdir(self.join(path))
345 return os.mkdir(self.join(path))
346
346
347 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
347 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
348 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
348 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
349 dir=self.join(dir), text=text)
349 dir=self.join(dir), text=text)
350 dname, fname = util.split(name)
350 dname, fname = util.split(name)
351 if dir:
351 if dir:
352 return fd, os.path.join(dir, fname)
352 return fd, os.path.join(dir, fname)
353 else:
353 else:
354 return fd, fname
354 return fd, fname
355
355
356 def readdir(self, path=None, stat=None, skip=None):
356 def readdir(self, path=None, stat=None, skip=None):
357 return osutil.listdir(self.join(path), stat, skip)
357 return osutil.listdir(self.join(path), stat, skip)
358
358
359 def readlock(self, path):
359 def readlock(self, path):
360 return util.readlock(self.join(path))
360 return util.readlock(self.join(path))
361
361
362 def rename(self, src, dst):
362 def rename(self, src, dst):
363 return util.rename(self.join(src), self.join(dst))
363 return util.rename(self.join(src), self.join(dst))
364
364
365 def readlink(self, path):
365 def readlink(self, path):
366 return os.readlink(self.join(path))
366 return os.readlink(self.join(path))
367
367
368 def removedirs(self, path=None):
368 def removedirs(self, path=None):
369 """Remove a leaf directory and all empty intermediate ones
369 """Remove a leaf directory and all empty intermediate ones
370 """
370 """
371 return util.removedirs(self.join(path))
371 return util.removedirs(self.join(path))
372
372
373 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
373 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
374 """Remove a directory tree recursively
374 """Remove a directory tree recursively
375
375
376 If ``forcibly``, this tries to remove READ-ONLY files, too.
376 If ``forcibly``, this tries to remove READ-ONLY files, too.
377 """
377 """
378 if forcibly:
378 if forcibly:
379 def onerror(function, path, excinfo):
379 def onerror(function, path, excinfo):
380 if function is not os.remove:
380 if function is not os.remove:
381 raise
381 raise
382 # read-only files cannot be unlinked under Windows
382 # read-only files cannot be unlinked under Windows
383 s = os.stat(path)
383 s = os.stat(path)
384 if (s.st_mode & stat.S_IWRITE) != 0:
384 if (s.st_mode & stat.S_IWRITE) != 0:
385 raise
385 raise
386 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
386 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
387 os.remove(path)
387 os.remove(path)
388 else:
388 else:
389 onerror = None
389 onerror = None
390 return shutil.rmtree(self.join(path),
390 return shutil.rmtree(self.join(path),
391 ignore_errors=ignore_errors, onerror=onerror)
391 ignore_errors=ignore_errors, onerror=onerror)
392
392
393 def setflags(self, path, l, x):
393 def setflags(self, path, l, x):
394 return util.setflags(self.join(path), l, x)
394 return util.setflags(self.join(path), l, x)
395
395
396 def stat(self, path=None):
396 def stat(self, path=None):
397 return os.stat(self.join(path))
397 return os.stat(self.join(path))
398
398
399 def unlink(self, path=None):
399 def unlink(self, path=None):
400 return util.unlink(self.join(path))
400 return util.unlink(self.join(path))
401
401
402 def unlinkpath(self, path=None, ignoremissing=False):
402 def unlinkpath(self, path=None, ignoremissing=False):
403 return util.unlinkpath(self.join(path), ignoremissing)
403 return util.unlinkpath(self.join(path), ignoremissing)
404
404
405 def utime(self, path=None, t=None):
405 def utime(self, path=None, t=None):
406 return os.utime(self.join(path), t)
406 return os.utime(self.join(path), t)
407
407
408 def walk(self, path=None, onerror=None):
408 def walk(self, path=None, onerror=None):
409 """Yield (dirpath, dirs, files) tuple for each directories under path
409 """Yield (dirpath, dirs, files) tuple for each directories under path
410
410
411 ``dirpath`` is relative one from the root of this vfs. This
411 ``dirpath`` is relative one from the root of this vfs. This
412 uses ``os.sep`` as path separator, even you specify POSIX
412 uses ``os.sep`` as path separator, even you specify POSIX
413 style ``path``.
413 style ``path``.
414
414
415 "The root of this vfs" is represented as empty ``dirpath``.
415 "The root of this vfs" is represented as empty ``dirpath``.
416 """
416 """
417 root = os.path.normpath(self.join(None))
417 root = os.path.normpath(self.join(None))
418 # when dirpath == root, dirpath[prefixlen:] becomes empty
418 # when dirpath == root, dirpath[prefixlen:] becomes empty
419 # because len(dirpath) < prefixlen.
419 # because len(dirpath) < prefixlen.
420 prefixlen = len(pathutil.normasprefix(root))
420 prefixlen = len(pathutil.normasprefix(root))
421 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
421 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
422 yield (dirpath[prefixlen:], dirs, files)
422 yield (dirpath[prefixlen:], dirs, files)
423
423
424 class vfs(abstractvfs):
424 class vfs(abstractvfs):
425 '''Operate files relative to a base directory
425 '''Operate files relative to a base directory
426
426
427 This class is used to hide the details of COW semantics and
427 This class is used to hide the details of COW semantics and
428 remote file access from higher level code.
428 remote file access from higher level code.
429 '''
429 '''
430 def __init__(self, base, audit=True, expandpath=False, realpath=False):
430 def __init__(self, base, audit=True, expandpath=False, realpath=False):
431 if expandpath:
431 if expandpath:
432 base = util.expandpath(base)
432 base = util.expandpath(base)
433 if realpath:
433 if realpath:
434 base = os.path.realpath(base)
434 base = os.path.realpath(base)
435 self.base = base
435 self.base = base
436 self._setmustaudit(audit)
436 self._setmustaudit(audit)
437 self.createmode = None
437 self.createmode = None
438 self._trustnlink = None
438 self._trustnlink = None
439
439
440 def _getmustaudit(self):
440 def _getmustaudit(self):
441 return self._audit
441 return self._audit
442
442
443 def _setmustaudit(self, onoff):
443 def _setmustaudit(self, onoff):
444 self._audit = onoff
444 self._audit = onoff
445 if onoff:
445 if onoff:
446 self.audit = pathutil.pathauditor(self.base)
446 self.audit = pathutil.pathauditor(self.base)
447 else:
447 else:
448 self.audit = util.always
448 self.audit = util.always
449
449
450 mustaudit = property(_getmustaudit, _setmustaudit)
450 mustaudit = property(_getmustaudit, _setmustaudit)
451
451
452 @util.propertycache
452 @util.propertycache
453 def _cansymlink(self):
453 def _cansymlink(self):
454 return util.checklink(self.base)
454 return util.checklink(self.base)
455
455
456 @util.propertycache
456 @util.propertycache
457 def _chmod(self):
457 def _chmod(self):
458 return util.checkexec(self.base)
458 return util.checkexec(self.base)
459
459
460 def _fixfilemode(self, name):
460 def _fixfilemode(self, name):
461 if self.createmode is None or not self._chmod:
461 if self.createmode is None or not self._chmod:
462 return
462 return
463 os.chmod(name, self.createmode & 0666)
463 os.chmod(name, self.createmode & 0666)
464
464
465 def __call__(self, path, mode="r", text=False, atomictemp=False,
465 def __call__(self, path, mode="r", text=False, atomictemp=False,
466 notindexed=False):
466 notindexed=False):
467 '''Open ``path`` file, which is relative to vfs root.
467 '''Open ``path`` file, which is relative to vfs root.
468
468
469 Newly created directories are marked as "not to be indexed by
469 Newly created directories are marked as "not to be indexed by
470 the content indexing service", if ``notindexed`` is specified
470 the content indexing service", if ``notindexed`` is specified
471 for "write" mode access.
471 for "write" mode access.
472 '''
472 '''
473 if self._audit:
473 if self._audit:
474 r = util.checkosfilename(path)
474 r = util.checkosfilename(path)
475 if r:
475 if r:
476 raise util.Abort("%s: %r" % (r, path))
476 raise util.Abort("%s: %r" % (r, path))
477 self.audit(path)
477 self.audit(path)
478 f = self.join(path)
478 f = self.join(path)
479
479
480 if not text and "b" not in mode:
480 if not text and "b" not in mode:
481 mode += "b" # for that other OS
481 mode += "b" # for that other OS
482
482
483 nlink = -1
483 nlink = -1
484 if mode not in ('r', 'rb'):
484 if mode not in ('r', 'rb'):
485 dirname, basename = util.split(f)
485 dirname, basename = util.split(f)
486 # If basename is empty, then the path is malformed because it points
486 # If basename is empty, then the path is malformed because it points
487 # to a directory. Let the posixfile() call below raise IOError.
487 # to a directory. Let the posixfile() call below raise IOError.
488 if basename:
488 if basename:
489 if atomictemp:
489 if atomictemp:
490 util.ensuredirs(dirname, self.createmode, notindexed)
490 util.ensuredirs(dirname, self.createmode, notindexed)
491 return util.atomictempfile(f, mode, self.createmode)
491 return util.atomictempfile(f, mode, self.createmode)
492 try:
492 try:
493 if 'w' in mode:
493 if 'w' in mode:
494 util.unlink(f)
494 util.unlink(f)
495 nlink = 0
495 nlink = 0
496 else:
496 else:
497 # nlinks() may behave differently for files on Windows
497 # nlinks() may behave differently for files on Windows
498 # shares if the file is open.
498 # shares if the file is open.
499 fd = util.posixfile(f)
499 fd = util.posixfile(f)
500 nlink = util.nlinks(f)
500 nlink = util.nlinks(f)
501 if nlink < 1:
501 if nlink < 1:
502 nlink = 2 # force mktempcopy (issue1922)
502 nlink = 2 # force mktempcopy (issue1922)
503 fd.close()
503 fd.close()
504 except (OSError, IOError), e:
504 except (OSError, IOError), e:
505 if e.errno != errno.ENOENT:
505 if e.errno != errno.ENOENT:
506 raise
506 raise
507 nlink = 0
507 nlink = 0
508 util.ensuredirs(dirname, self.createmode, notindexed)
508 util.ensuredirs(dirname, self.createmode, notindexed)
509 if nlink > 0:
509 if nlink > 0:
510 if self._trustnlink is None:
510 if self._trustnlink is None:
511 self._trustnlink = nlink > 1 or util.checknlink(f)
511 self._trustnlink = nlink > 1 or util.checknlink(f)
512 if nlink > 1 or not self._trustnlink:
512 if nlink > 1 or not self._trustnlink:
513 util.rename(util.mktempcopy(f), f)
513 util.rename(util.mktempcopy(f), f)
514 fp = util.posixfile(f, mode)
514 fp = util.posixfile(f, mode)
515 if nlink == 0:
515 if nlink == 0:
516 self._fixfilemode(f)
516 self._fixfilemode(f)
517 return fp
517 return fp
518
518
519 def symlink(self, src, dst):
519 def symlink(self, src, dst):
520 self.audit(dst)
520 self.audit(dst)
521 linkname = self.join(dst)
521 linkname = self.join(dst)
522 try:
522 try:
523 os.unlink(linkname)
523 os.unlink(linkname)
524 except OSError:
524 except OSError:
525 pass
525 pass
526
526
527 util.ensuredirs(os.path.dirname(linkname), self.createmode)
527 util.ensuredirs(os.path.dirname(linkname), self.createmode)
528
528
529 if self._cansymlink:
529 if self._cansymlink:
530 try:
530 try:
531 os.symlink(src, linkname)
531 os.symlink(src, linkname)
532 except OSError, err:
532 except OSError, err:
533 raise OSError(err.errno, _('could not symlink to %r: %s') %
533 raise OSError(err.errno, _('could not symlink to %r: %s') %
534 (src, err.strerror), linkname)
534 (src, err.strerror), linkname)
535 else:
535 else:
536 self.write(dst, src)
536 self.write(dst, src)
537
537
538 def join(self, path, *insidef):
538 def join(self, path, *insidef):
539 if path:
539 if path:
540 return os.path.join(self.base, path, *insidef)
540 return os.path.join(self.base, path, *insidef)
541 else:
541 else:
542 return self.base
542 return self.base
543
543
544 opener = vfs
544 opener = vfs
545
545
546 class auditvfs(object):
546 class auditvfs(object):
547 def __init__(self, vfs):
547 def __init__(self, vfs):
548 self.vfs = vfs
548 self.vfs = vfs
549
549
550 def _getmustaudit(self):
550 def _getmustaudit(self):
551 return self.vfs.mustaudit
551 return self.vfs.mustaudit
552
552
553 def _setmustaudit(self, onoff):
553 def _setmustaudit(self, onoff):
554 self.vfs.mustaudit = onoff
554 self.vfs.mustaudit = onoff
555
555
556 mustaudit = property(_getmustaudit, _setmustaudit)
556 mustaudit = property(_getmustaudit, _setmustaudit)
557
557
558 class filtervfs(abstractvfs, auditvfs):
558 class filtervfs(abstractvfs, auditvfs):
559 '''Wrapper vfs for filtering filenames with a function.'''
559 '''Wrapper vfs for filtering filenames with a function.'''
560
560
561 def __init__(self, vfs, filter):
561 def __init__(self, vfs, filter):
562 auditvfs.__init__(self, vfs)
562 auditvfs.__init__(self, vfs)
563 self._filter = filter
563 self._filter = filter
564
564
565 def __call__(self, path, *args, **kwargs):
565 def __call__(self, path, *args, **kwargs):
566 return self.vfs(self._filter(path), *args, **kwargs)
566 return self.vfs(self._filter(path), *args, **kwargs)
567
567
568 def join(self, path, *insidef):
568 def join(self, path, *insidef):
569 if path:
569 if path:
570 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
570 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
571 else:
571 else:
572 return self.vfs.join(path)
572 return self.vfs.join(path)
573
573
574 filteropener = filtervfs
574 filteropener = filtervfs
575
575
576 class readonlyvfs(abstractvfs, auditvfs):
576 class readonlyvfs(abstractvfs, auditvfs):
577 '''Wrapper vfs preventing any writing.'''
577 '''Wrapper vfs preventing any writing.'''
578
578
579 def __init__(self, vfs):
579 def __init__(self, vfs):
580 auditvfs.__init__(self, vfs)
580 auditvfs.__init__(self, vfs)
581
581
582 def __call__(self, path, mode='r', *args, **kw):
582 def __call__(self, path, mode='r', *args, **kw):
583 if mode not in ('r', 'rb'):
583 if mode not in ('r', 'rb'):
584 raise util.Abort('this vfs is read only')
584 raise util.Abort('this vfs is read only')
585 return self.vfs(path, mode, *args, **kw)
585 return self.vfs(path, mode, *args, **kw)
586
586
587
587
588 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
588 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
589 '''yield every hg repository under path, always recursively.
589 '''yield every hg repository under path, always recursively.
590 The recurse flag will only control recursion into repo working dirs'''
590 The recurse flag will only control recursion into repo working dirs'''
591 def errhandler(err):
591 def errhandler(err):
592 if err.filename == path:
592 if err.filename == path:
593 raise err
593 raise err
594 samestat = getattr(os.path, 'samestat', None)
594 samestat = getattr(os.path, 'samestat', None)
595 if followsym and samestat is not None:
595 if followsym and samestat is not None:
596 def adddir(dirlst, dirname):
596 def adddir(dirlst, dirname):
597 match = False
597 match = False
598 dirstat = os.stat(dirname)
598 dirstat = os.stat(dirname)
599 for lstdirstat in dirlst:
599 for lstdirstat in dirlst:
600 if samestat(dirstat, lstdirstat):
600 if samestat(dirstat, lstdirstat):
601 match = True
601 match = True
602 break
602 break
603 if not match:
603 if not match:
604 dirlst.append(dirstat)
604 dirlst.append(dirstat)
605 return not match
605 return not match
606 else:
606 else:
607 followsym = False
607 followsym = False
608
608
609 if (seen_dirs is None) and followsym:
609 if (seen_dirs is None) and followsym:
610 seen_dirs = []
610 seen_dirs = []
611 adddir(seen_dirs, path)
611 adddir(seen_dirs, path)
612 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
612 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
613 dirs.sort()
613 dirs.sort()
614 if '.hg' in dirs:
614 if '.hg' in dirs:
615 yield root # found a repository
615 yield root # found a repository
616 qroot = os.path.join(root, '.hg', 'patches')
616 qroot = os.path.join(root, '.hg', 'patches')
617 if os.path.isdir(os.path.join(qroot, '.hg')):
617 if os.path.isdir(os.path.join(qroot, '.hg')):
618 yield qroot # we have a patch queue repo here
618 yield qroot # we have a patch queue repo here
619 if recurse:
619 if recurse:
620 # avoid recursing inside the .hg directory
620 # avoid recursing inside the .hg directory
621 dirs.remove('.hg')
621 dirs.remove('.hg')
622 else:
622 else:
623 dirs[:] = [] # don't descend further
623 dirs[:] = [] # don't descend further
624 elif followsym:
624 elif followsym:
625 newdirs = []
625 newdirs = []
626 for d in dirs:
626 for d in dirs:
627 fname = os.path.join(root, d)
627 fname = os.path.join(root, d)
628 if adddir(seen_dirs, fname):
628 if adddir(seen_dirs, fname):
629 if os.path.islink(fname):
629 if os.path.islink(fname):
630 for hgname in walkrepos(fname, True, seen_dirs):
630 for hgname in walkrepos(fname, True, seen_dirs):
631 yield hgname
631 yield hgname
632 else:
632 else:
633 newdirs.append(d)
633 newdirs.append(d)
634 dirs[:] = newdirs
634 dirs[:] = newdirs
635
635
636 def osrcpath():
636 def osrcpath():
637 '''return default os-specific hgrc search path'''
637 '''return default os-specific hgrc search path'''
638 path = []
638 path = []
639 defaultpath = os.path.join(util.datapath, 'default.d')
639 defaultpath = os.path.join(util.datapath, 'default.d')
640 if os.path.isdir(defaultpath):
640 if os.path.isdir(defaultpath):
641 for f, kind in osutil.listdir(defaultpath):
641 for f, kind in osutil.listdir(defaultpath):
642 if f.endswith('.rc'):
642 if f.endswith('.rc'):
643 path.append(os.path.join(defaultpath, f))
643 path.append(os.path.join(defaultpath, f))
644 path.extend(systemrcpath())
644 path.extend(systemrcpath())
645 path.extend(userrcpath())
645 path.extend(userrcpath())
646 path = [os.path.normpath(f) for f in path]
646 path = [os.path.normpath(f) for f in path]
647 return path
647 return path
648
648
649 _rcpath = None
649 _rcpath = None
650
650
651 def rcpath():
651 def rcpath():
652 '''return hgrc search path. if env var HGRCPATH is set, use it.
652 '''return hgrc search path. if env var HGRCPATH is set, use it.
653 for each item in path, if directory, use files ending in .rc,
653 for each item in path, if directory, use files ending in .rc,
654 else use item.
654 else use item.
655 make HGRCPATH empty to only look in .hg/hgrc of current repo.
655 make HGRCPATH empty to only look in .hg/hgrc of current repo.
656 if no HGRCPATH, use default os-specific path.'''
656 if no HGRCPATH, use default os-specific path.'''
657 global _rcpath
657 global _rcpath
658 if _rcpath is None:
658 if _rcpath is None:
659 if 'HGRCPATH' in os.environ:
659 if 'HGRCPATH' in os.environ:
660 _rcpath = []
660 _rcpath = []
661 for p in os.environ['HGRCPATH'].split(os.pathsep):
661 for p in os.environ['HGRCPATH'].split(os.pathsep):
662 if not p:
662 if not p:
663 continue
663 continue
664 p = util.expandpath(p)
664 p = util.expandpath(p)
665 if os.path.isdir(p):
665 if os.path.isdir(p):
666 for f, kind in osutil.listdir(p):
666 for f, kind in osutil.listdir(p):
667 if f.endswith('.rc'):
667 if f.endswith('.rc'):
668 _rcpath.append(os.path.join(p, f))
668 _rcpath.append(os.path.join(p, f))
669 else:
669 else:
670 _rcpath.append(p)
670 _rcpath.append(p)
671 else:
671 else:
672 _rcpath = osrcpath()
672 _rcpath = osrcpath()
673 return _rcpath
673 return _rcpath
674
674
675 def intrev(repo, rev):
675 def intrev(repo, rev):
676 """Return integer for a given revision that can be used in comparison or
676 """Return integer for a given revision that can be used in comparison or
677 arithmetic operation"""
677 arithmetic operation"""
678 if rev is None:
678 if rev is None:
679 return len(repo)
679 return len(repo)
680 return rev
680 return rev
681
681
682 def revsingle(repo, revspec, default='.'):
682 def revsingle(repo, revspec, default='.'):
683 if not revspec and revspec != 0:
683 if not revspec and revspec != 0:
684 return repo[default]
684 return repo[default]
685
685
686 l = revrange(repo, [revspec])
686 l = revrange(repo, [revspec])
687 if not l:
687 if not l:
688 raise util.Abort(_('empty revision set'))
688 raise util.Abort(_('empty revision set'))
689 return repo[l.last()]
689 return repo[l.last()]
690
690
691 def revpair(repo, revs):
691 def revpair(repo, revs):
692 if not revs:
692 if not revs:
693 return repo.dirstate.p1(), None
693 return repo.dirstate.p1(), None
694
694
695 l = revrange(repo, revs)
695 l = revrange(repo, revs)
696
696
697 if not l:
697 if not l:
698 first = second = None
698 first = second = None
699 elif l.isascending():
699 elif l.isascending():
700 first = l.min()
700 first = l.min()
701 second = l.max()
701 second = l.max()
702 elif l.isdescending():
702 elif l.isdescending():
703 first = l.max()
703 first = l.max()
704 second = l.min()
704 second = l.min()
705 else:
705 else:
706 first = l.first()
706 first = l.first()
707 second = l.last()
707 second = l.last()
708
708
709 if first is None:
709 if first is None:
710 raise util.Abort(_('empty revision range'))
710 raise util.Abort(_('empty revision range'))
711
711
712 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
712 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
713 return repo.lookup(first), None
713 return repo.lookup(first), None
714
714
715 return repo.lookup(first), repo.lookup(second)
715 return repo.lookup(first), repo.lookup(second)
716
716
717 _revrangesep = ':'
717 _revrangesep = ':'
718
718
719 def revrange(repo, revs):
719 def revrange(repo, revs):
720 """Yield revision as strings from a list of revision specifications."""
720 """Yield revision as strings from a list of revision specifications."""
721
721
722 def revfix(repo, val, defval):
722 def revfix(repo, val, defval):
723 if not val and val != 0 and defval is not None:
723 if not val and val != 0 and defval is not None:
724 return defval
724 return defval
725 return repo[val].rev()
725 return repo[val].rev()
726
726
727 subsets = []
727 subsets = []
728
728
729 revsetaliases = [alias for (alias, _) in
729 revsetaliases = [alias for (alias, _) in
730 repo.ui.configitems("revsetalias")]
730 repo.ui.configitems("revsetalias")]
731
731
732 for spec in revs:
732 for spec in revs:
733 # attempt to parse old-style ranges first to deal with
733 # attempt to parse old-style ranges first to deal with
734 # things like old-tag which contain query metacharacters
734 # things like old-tag which contain query metacharacters
735 try:
735 try:
736 # ... except for revset aliases without arguments. These
736 # ... except for revset aliases without arguments. These
737 # should be parsed as soon as possible, because they might
737 # should be parsed as soon as possible, because they might
738 # clash with a hash prefix.
738 # clash with a hash prefix.
739 if spec in revsetaliases:
739 if spec in revsetaliases:
740 raise error.RepoLookupError
740 raise error.RepoLookupError
741
741
742 if isinstance(spec, int):
742 if isinstance(spec, int):
743 subsets.append(revset.baseset([spec]))
743 subsets.append(revset.baseset([spec]))
744 continue
744 continue
745
745
746 if _revrangesep in spec:
746 if _revrangesep in spec:
747 start, end = spec.split(_revrangesep, 1)
747 start, end = spec.split(_revrangesep, 1)
748 if start in revsetaliases or end in revsetaliases:
748 if start in revsetaliases or end in revsetaliases:
749 raise error.RepoLookupError
749 raise error.RepoLookupError
750
750
751 start = revfix(repo, start, 0)
751 start = revfix(repo, start, 0)
752 end = revfix(repo, end, len(repo) - 1)
752 end = revfix(repo, end, len(repo) - 1)
753 if end == nullrev and start < 0:
753 if end == nullrev and start < 0:
754 start = nullrev
754 start = nullrev
755 if start < end:
755 if start < end:
756 l = revset.spanset(repo, start, end + 1)
756 l = revset.spanset(repo, start, end + 1)
757 else:
757 else:
758 l = revset.spanset(repo, start, end - 1)
758 l = revset.spanset(repo, start, end - 1)
759 subsets.append(l)
759 subsets.append(l)
760 continue
760 continue
761 elif spec and spec in repo: # single unquoted rev
761 elif spec and spec in repo: # single unquoted rev
762 rev = revfix(repo, spec, None)
762 rev = revfix(repo, spec, None)
763 subsets.append(revset.baseset([rev]))
763 subsets.append(revset.baseset([rev]))
764 continue
764 continue
765 except error.RepoLookupError:
765 except error.RepoLookupError:
766 pass
766 pass
767
767
768 # fall through to new-style queries if old-style fails
768 # fall through to new-style queries if old-style fails
769 m = revset.match(repo.ui, spec, repo)
769 m = revset.match(repo.ui, spec, repo)
770 subsets.append(m(repo))
770 subsets.append(m(repo))
771
771
772 return revset._combinesets(subsets)
772 return revset._combinesets(subsets)
773
773
774 def expandpats(pats):
774 def expandpats(pats):
775 '''Expand bare globs when running on windows.
775 '''Expand bare globs when running on windows.
776 On posix we assume it already has already been done by sh.'''
776 On posix we assume it already has already been done by sh.'''
777 if not util.expandglobs:
777 if not util.expandglobs:
778 return list(pats)
778 return list(pats)
779 ret = []
779 ret = []
780 for kindpat in pats:
780 for kindpat in pats:
781 kind, pat = matchmod._patsplit(kindpat, None)
781 kind, pat = matchmod._patsplit(kindpat, None)
782 if kind is None:
782 if kind is None:
783 try:
783 try:
784 globbed = glob.glob(pat)
784 globbed = glob.glob(pat)
785 except re.error:
785 except re.error:
786 globbed = [pat]
786 globbed = [pat]
787 if globbed:
787 if globbed:
788 ret.extend(globbed)
788 ret.extend(globbed)
789 continue
789 continue
790 ret.append(kindpat)
790 ret.append(kindpat)
791 return ret
791 return ret
792
792
793 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
793 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
794 '''Return a matcher and the patterns that were used.
794 '''Return a matcher and the patterns that were used.
795 The matcher will warn about bad matches.'''
795 The matcher will warn about bad matches.'''
796 if pats == ("",):
796 if pats == ("",):
797 pats = []
797 pats = []
798 if not globbed and default == 'relpath':
798 if not globbed and default == 'relpath':
799 pats = expandpats(pats or [])
799 pats = expandpats(pats or [])
800
800
801 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
802 default, listsubrepos=opts.get('subrepos'))
803 def badfn(f, msg):
801 def badfn(f, msg):
804 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
802 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
805 m.bad = badfn
803
804 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
805 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
806
806 if m.always():
807 if m.always():
807 pats = []
808 pats = []
808 return m, pats
809 return m, pats
809
810
810 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
811 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
811 '''Return a matcher that will warn about bad matches.'''
812 '''Return a matcher that will warn about bad matches.'''
812 return matchandpats(ctx, pats, opts, globbed, default)[0]
813 return matchandpats(ctx, pats, opts, globbed, default)[0]
813
814
814 def matchall(repo):
815 def matchall(repo):
815 '''Return a matcher that will efficiently match everything.'''
816 '''Return a matcher that will efficiently match everything.'''
816 return matchmod.always(repo.root, repo.getcwd())
817 return matchmod.always(repo.root, repo.getcwd())
817
818
818 def matchfiles(repo, files):
819 def matchfiles(repo, files):
819 '''Return a matcher that will efficiently match exactly these files.'''
820 '''Return a matcher that will efficiently match exactly these files.'''
820 return matchmod.exact(repo.root, repo.getcwd(), files)
821 return matchmod.exact(repo.root, repo.getcwd(), files)
821
822
822 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
823 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
823 m = matcher
824 m = matcher
824 if dry_run is None:
825 if dry_run is None:
825 dry_run = opts.get('dry_run')
826 dry_run = opts.get('dry_run')
826 if similarity is None:
827 if similarity is None:
827 similarity = float(opts.get('similarity') or 0)
828 similarity = float(opts.get('similarity') or 0)
828
829
829 ret = 0
830 ret = 0
830 join = lambda f: os.path.join(prefix, f)
831 join = lambda f: os.path.join(prefix, f)
831
832
832 def matchessubrepo(matcher, subpath):
833 def matchessubrepo(matcher, subpath):
833 if matcher.exact(subpath):
834 if matcher.exact(subpath):
834 return True
835 return True
835 for f in matcher.files():
836 for f in matcher.files():
836 if f.startswith(subpath):
837 if f.startswith(subpath):
837 return True
838 return True
838 return False
839 return False
839
840
840 wctx = repo[None]
841 wctx = repo[None]
841 for subpath in sorted(wctx.substate):
842 for subpath in sorted(wctx.substate):
842 if opts.get('subrepos') or matchessubrepo(m, subpath):
843 if opts.get('subrepos') or matchessubrepo(m, subpath):
843 sub = wctx.sub(subpath)
844 sub = wctx.sub(subpath)
844 try:
845 try:
845 submatch = matchmod.narrowmatcher(subpath, m)
846 submatch = matchmod.narrowmatcher(subpath, m)
846 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
847 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
847 ret = 1
848 ret = 1
848 except error.LookupError:
849 except error.LookupError:
849 repo.ui.status(_("skipping missing subrepository: %s\n")
850 repo.ui.status(_("skipping missing subrepository: %s\n")
850 % join(subpath))
851 % join(subpath))
851
852
852 rejected = []
853 rejected = []
853 def badfn(f, msg):
854 def badfn(f, msg):
854 if f in m.files():
855 if f in m.files():
855 m.bad(f, msg)
856 m.bad(f, msg)
856 rejected.append(f)
857 rejected.append(f)
857
858
858 badmatch = matchmod.badmatch(m, badfn)
859 badmatch = matchmod.badmatch(m, badfn)
859 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
860 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
860 badmatch)
861 badmatch)
861
862
862 unknownset = set(unknown + forgotten)
863 unknownset = set(unknown + forgotten)
863 toprint = unknownset.copy()
864 toprint = unknownset.copy()
864 toprint.update(deleted)
865 toprint.update(deleted)
865 for abs in sorted(toprint):
866 for abs in sorted(toprint):
866 if repo.ui.verbose or not m.exact(abs):
867 if repo.ui.verbose or not m.exact(abs):
867 if abs in unknownset:
868 if abs in unknownset:
868 status = _('adding %s\n') % m.uipath(abs)
869 status = _('adding %s\n') % m.uipath(abs)
869 else:
870 else:
870 status = _('removing %s\n') % m.uipath(abs)
871 status = _('removing %s\n') % m.uipath(abs)
871 repo.ui.status(status)
872 repo.ui.status(status)
872
873
873 renames = _findrenames(repo, m, added + unknown, removed + deleted,
874 renames = _findrenames(repo, m, added + unknown, removed + deleted,
874 similarity)
875 similarity)
875
876
876 if not dry_run:
877 if not dry_run:
877 _markchanges(repo, unknown + forgotten, deleted, renames)
878 _markchanges(repo, unknown + forgotten, deleted, renames)
878
879
879 for f in rejected:
880 for f in rejected:
880 if f in m.files():
881 if f in m.files():
881 return 1
882 return 1
882 return ret
883 return ret
883
884
884 def marktouched(repo, files, similarity=0.0):
885 def marktouched(repo, files, similarity=0.0):
885 '''Assert that files have somehow been operated upon. files are relative to
886 '''Assert that files have somehow been operated upon. files are relative to
886 the repo root.'''
887 the repo root.'''
887 m = matchfiles(repo, files)
888 m = matchfiles(repo, files)
888 rejected = []
889 rejected = []
889 m.bad = lambda x, y: rejected.append(x)
890 m.bad = lambda x, y: rejected.append(x)
890
891
891 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
892 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
892
893
893 if repo.ui.verbose:
894 if repo.ui.verbose:
894 unknownset = set(unknown + forgotten)
895 unknownset = set(unknown + forgotten)
895 toprint = unknownset.copy()
896 toprint = unknownset.copy()
896 toprint.update(deleted)
897 toprint.update(deleted)
897 for abs in sorted(toprint):
898 for abs in sorted(toprint):
898 if abs in unknownset:
899 if abs in unknownset:
899 status = _('adding %s\n') % abs
900 status = _('adding %s\n') % abs
900 else:
901 else:
901 status = _('removing %s\n') % abs
902 status = _('removing %s\n') % abs
902 repo.ui.status(status)
903 repo.ui.status(status)
903
904
904 renames = _findrenames(repo, m, added + unknown, removed + deleted,
905 renames = _findrenames(repo, m, added + unknown, removed + deleted,
905 similarity)
906 similarity)
906
907
907 _markchanges(repo, unknown + forgotten, deleted, renames)
908 _markchanges(repo, unknown + forgotten, deleted, renames)
908
909
909 for f in rejected:
910 for f in rejected:
910 if f in m.files():
911 if f in m.files():
911 return 1
912 return 1
912 return 0
913 return 0
913
914
914 def _interestingfiles(repo, matcher):
915 def _interestingfiles(repo, matcher):
915 '''Walk dirstate with matcher, looking for files that addremove would care
916 '''Walk dirstate with matcher, looking for files that addremove would care
916 about.
917 about.
917
918
918 This is different from dirstate.status because it doesn't care about
919 This is different from dirstate.status because it doesn't care about
919 whether files are modified or clean.'''
920 whether files are modified or clean.'''
920 added, unknown, deleted, removed, forgotten = [], [], [], [], []
921 added, unknown, deleted, removed, forgotten = [], [], [], [], []
921 audit_path = pathutil.pathauditor(repo.root)
922 audit_path = pathutil.pathauditor(repo.root)
922
923
923 ctx = repo[None]
924 ctx = repo[None]
924 dirstate = repo.dirstate
925 dirstate = repo.dirstate
925 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
926 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
926 full=False)
927 full=False)
927 for abs, st in walkresults.iteritems():
928 for abs, st in walkresults.iteritems():
928 dstate = dirstate[abs]
929 dstate = dirstate[abs]
929 if dstate == '?' and audit_path.check(abs):
930 if dstate == '?' and audit_path.check(abs):
930 unknown.append(abs)
931 unknown.append(abs)
931 elif dstate != 'r' and not st:
932 elif dstate != 'r' and not st:
932 deleted.append(abs)
933 deleted.append(abs)
933 elif dstate == 'r' and st:
934 elif dstate == 'r' and st:
934 forgotten.append(abs)
935 forgotten.append(abs)
935 # for finding renames
936 # for finding renames
936 elif dstate == 'r' and not st:
937 elif dstate == 'r' and not st:
937 removed.append(abs)
938 removed.append(abs)
938 elif dstate == 'a':
939 elif dstate == 'a':
939 added.append(abs)
940 added.append(abs)
940
941
941 return added, unknown, deleted, removed, forgotten
942 return added, unknown, deleted, removed, forgotten
942
943
943 def _findrenames(repo, matcher, added, removed, similarity):
944 def _findrenames(repo, matcher, added, removed, similarity):
944 '''Find renames from removed files to added ones.'''
945 '''Find renames from removed files to added ones.'''
945 renames = {}
946 renames = {}
946 if similarity > 0:
947 if similarity > 0:
947 for old, new, score in similar.findrenames(repo, added, removed,
948 for old, new, score in similar.findrenames(repo, added, removed,
948 similarity):
949 similarity):
949 if (repo.ui.verbose or not matcher.exact(old)
950 if (repo.ui.verbose or not matcher.exact(old)
950 or not matcher.exact(new)):
951 or not matcher.exact(new)):
951 repo.ui.status(_('recording removal of %s as rename to %s '
952 repo.ui.status(_('recording removal of %s as rename to %s '
952 '(%d%% similar)\n') %
953 '(%d%% similar)\n') %
953 (matcher.rel(old), matcher.rel(new),
954 (matcher.rel(old), matcher.rel(new),
954 score * 100))
955 score * 100))
955 renames[new] = old
956 renames[new] = old
956 return renames
957 return renames
957
958
958 def _markchanges(repo, unknown, deleted, renames):
959 def _markchanges(repo, unknown, deleted, renames):
959 '''Marks the files in unknown as added, the files in deleted as removed,
960 '''Marks the files in unknown as added, the files in deleted as removed,
960 and the files in renames as copied.'''
961 and the files in renames as copied.'''
961 wctx = repo[None]
962 wctx = repo[None]
962 wlock = repo.wlock()
963 wlock = repo.wlock()
963 try:
964 try:
964 wctx.forget(deleted)
965 wctx.forget(deleted)
965 wctx.add(unknown)
966 wctx.add(unknown)
966 for new, old in renames.iteritems():
967 for new, old in renames.iteritems():
967 wctx.copy(old, new)
968 wctx.copy(old, new)
968 finally:
969 finally:
969 wlock.release()
970 wlock.release()
970
971
971 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
972 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
972 """Update the dirstate to reflect the intent of copying src to dst. For
973 """Update the dirstate to reflect the intent of copying src to dst. For
973 different reasons it might not end with dst being marked as copied from src.
974 different reasons it might not end with dst being marked as copied from src.
974 """
975 """
975 origsrc = repo.dirstate.copied(src) or src
976 origsrc = repo.dirstate.copied(src) or src
976 if dst == origsrc: # copying back a copy?
977 if dst == origsrc: # copying back a copy?
977 if repo.dirstate[dst] not in 'mn' and not dryrun:
978 if repo.dirstate[dst] not in 'mn' and not dryrun:
978 repo.dirstate.normallookup(dst)
979 repo.dirstate.normallookup(dst)
979 else:
980 else:
980 if repo.dirstate[origsrc] == 'a' and origsrc == src:
981 if repo.dirstate[origsrc] == 'a' and origsrc == src:
981 if not ui.quiet:
982 if not ui.quiet:
982 ui.warn(_("%s has not been committed yet, so no copy "
983 ui.warn(_("%s has not been committed yet, so no copy "
983 "data will be stored for %s.\n")
984 "data will be stored for %s.\n")
984 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
985 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
985 if repo.dirstate[dst] in '?r' and not dryrun:
986 if repo.dirstate[dst] in '?r' and not dryrun:
986 wctx.add([dst])
987 wctx.add([dst])
987 elif not dryrun:
988 elif not dryrun:
988 wctx.copy(origsrc, dst)
989 wctx.copy(origsrc, dst)
989
990
990 def readrequires(opener, supported):
991 def readrequires(opener, supported):
991 '''Reads and parses .hg/requires and checks if all entries found
992 '''Reads and parses .hg/requires and checks if all entries found
992 are in the list of supported features.'''
993 are in the list of supported features.'''
993 requirements = set(opener.read("requires").splitlines())
994 requirements = set(opener.read("requires").splitlines())
994 missings = []
995 missings = []
995 for r in requirements:
996 for r in requirements:
996 if r not in supported:
997 if r not in supported:
997 if not r or not r[0].isalnum():
998 if not r or not r[0].isalnum():
998 raise error.RequirementError(_(".hg/requires file is corrupt"))
999 raise error.RequirementError(_(".hg/requires file is corrupt"))
999 missings.append(r)
1000 missings.append(r)
1000 missings.sort()
1001 missings.sort()
1001 if missings:
1002 if missings:
1002 raise error.RequirementError(
1003 raise error.RequirementError(
1003 _("repository requires features unknown to this Mercurial: %s")
1004 _("repository requires features unknown to this Mercurial: %s")
1004 % " ".join(missings),
1005 % " ".join(missings),
1005 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
1006 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
1006 " for more information"))
1007 " for more information"))
1007 return requirements
1008 return requirements
1008
1009
1009 def writerequires(opener, requirements):
1010 def writerequires(opener, requirements):
1010 reqfile = opener("requires", "w")
1011 reqfile = opener("requires", "w")
1011 for r in sorted(requirements):
1012 for r in sorted(requirements):
1012 reqfile.write("%s\n" % r)
1013 reqfile.write("%s\n" % r)
1013 reqfile.close()
1014 reqfile.close()
1014
1015
1015 class filecachesubentry(object):
1016 class filecachesubentry(object):
1016 def __init__(self, path, stat):
1017 def __init__(self, path, stat):
1017 self.path = path
1018 self.path = path
1018 self.cachestat = None
1019 self.cachestat = None
1019 self._cacheable = None
1020 self._cacheable = None
1020
1021
1021 if stat:
1022 if stat:
1022 self.cachestat = filecachesubentry.stat(self.path)
1023 self.cachestat = filecachesubentry.stat(self.path)
1023
1024
1024 if self.cachestat:
1025 if self.cachestat:
1025 self._cacheable = self.cachestat.cacheable()
1026 self._cacheable = self.cachestat.cacheable()
1026 else:
1027 else:
1027 # None means we don't know yet
1028 # None means we don't know yet
1028 self._cacheable = None
1029 self._cacheable = None
1029
1030
1030 def refresh(self):
1031 def refresh(self):
1031 if self.cacheable():
1032 if self.cacheable():
1032 self.cachestat = filecachesubentry.stat(self.path)
1033 self.cachestat = filecachesubentry.stat(self.path)
1033
1034
1034 def cacheable(self):
1035 def cacheable(self):
1035 if self._cacheable is not None:
1036 if self._cacheable is not None:
1036 return self._cacheable
1037 return self._cacheable
1037
1038
1038 # we don't know yet, assume it is for now
1039 # we don't know yet, assume it is for now
1039 return True
1040 return True
1040
1041
1041 def changed(self):
1042 def changed(self):
1042 # no point in going further if we can't cache it
1043 # no point in going further if we can't cache it
1043 if not self.cacheable():
1044 if not self.cacheable():
1044 return True
1045 return True
1045
1046
1046 newstat = filecachesubentry.stat(self.path)
1047 newstat = filecachesubentry.stat(self.path)
1047
1048
1048 # we may not know if it's cacheable yet, check again now
1049 # we may not know if it's cacheable yet, check again now
1049 if newstat and self._cacheable is None:
1050 if newstat and self._cacheable is None:
1050 self._cacheable = newstat.cacheable()
1051 self._cacheable = newstat.cacheable()
1051
1052
1052 # check again
1053 # check again
1053 if not self._cacheable:
1054 if not self._cacheable:
1054 return True
1055 return True
1055
1056
1056 if self.cachestat != newstat:
1057 if self.cachestat != newstat:
1057 self.cachestat = newstat
1058 self.cachestat = newstat
1058 return True
1059 return True
1059 else:
1060 else:
1060 return False
1061 return False
1061
1062
1062 @staticmethod
1063 @staticmethod
1063 def stat(path):
1064 def stat(path):
1064 try:
1065 try:
1065 return util.cachestat(path)
1066 return util.cachestat(path)
1066 except OSError, e:
1067 except OSError, e:
1067 if e.errno != errno.ENOENT:
1068 if e.errno != errno.ENOENT:
1068 raise
1069 raise
1069
1070
1070 class filecacheentry(object):
1071 class filecacheentry(object):
1071 def __init__(self, paths, stat=True):
1072 def __init__(self, paths, stat=True):
1072 self._entries = []
1073 self._entries = []
1073 for path in paths:
1074 for path in paths:
1074 self._entries.append(filecachesubentry(path, stat))
1075 self._entries.append(filecachesubentry(path, stat))
1075
1076
1076 def changed(self):
1077 def changed(self):
1077 '''true if any entry has changed'''
1078 '''true if any entry has changed'''
1078 for entry in self._entries:
1079 for entry in self._entries:
1079 if entry.changed():
1080 if entry.changed():
1080 return True
1081 return True
1081 return False
1082 return False
1082
1083
1083 def refresh(self):
1084 def refresh(self):
1084 for entry in self._entries:
1085 for entry in self._entries:
1085 entry.refresh()
1086 entry.refresh()
1086
1087
1087 class filecache(object):
1088 class filecache(object):
1088 '''A property like decorator that tracks files under .hg/ for updates.
1089 '''A property like decorator that tracks files under .hg/ for updates.
1089
1090
1090 Records stat info when called in _filecache.
1091 Records stat info when called in _filecache.
1091
1092
1092 On subsequent calls, compares old stat info with new info, and recreates the
1093 On subsequent calls, compares old stat info with new info, and recreates the
1093 object when any of the files changes, updating the new stat info in
1094 object when any of the files changes, updating the new stat info in
1094 _filecache.
1095 _filecache.
1095
1096
1096 Mercurial either atomic renames or appends for files under .hg,
1097 Mercurial either atomic renames or appends for files under .hg,
1097 so to ensure the cache is reliable we need the filesystem to be able
1098 so to ensure the cache is reliable we need the filesystem to be able
1098 to tell us if a file has been replaced. If it can't, we fallback to
1099 to tell us if a file has been replaced. If it can't, we fallback to
1099 recreating the object on every call (essentially the same behaviour as
1100 recreating the object on every call (essentially the same behaviour as
1100 propertycache).
1101 propertycache).
1101
1102
1102 '''
1103 '''
1103 def __init__(self, *paths):
1104 def __init__(self, *paths):
1104 self.paths = paths
1105 self.paths = paths
1105
1106
1106 def join(self, obj, fname):
1107 def join(self, obj, fname):
1107 """Used to compute the runtime path of a cached file.
1108 """Used to compute the runtime path of a cached file.
1108
1109
1109 Users should subclass filecache and provide their own version of this
1110 Users should subclass filecache and provide their own version of this
1110 function to call the appropriate join function on 'obj' (an instance
1111 function to call the appropriate join function on 'obj' (an instance
1111 of the class that its member function was decorated).
1112 of the class that its member function was decorated).
1112 """
1113 """
1113 return obj.join(fname)
1114 return obj.join(fname)
1114
1115
1115 def __call__(self, func):
1116 def __call__(self, func):
1116 self.func = func
1117 self.func = func
1117 self.name = func.__name__
1118 self.name = func.__name__
1118 return self
1119 return self
1119
1120
1120 def __get__(self, obj, type=None):
1121 def __get__(self, obj, type=None):
1121 # do we need to check if the file changed?
1122 # do we need to check if the file changed?
1122 if self.name in obj.__dict__:
1123 if self.name in obj.__dict__:
1123 assert self.name in obj._filecache, self.name
1124 assert self.name in obj._filecache, self.name
1124 return obj.__dict__[self.name]
1125 return obj.__dict__[self.name]
1125
1126
1126 entry = obj._filecache.get(self.name)
1127 entry = obj._filecache.get(self.name)
1127
1128
1128 if entry:
1129 if entry:
1129 if entry.changed():
1130 if entry.changed():
1130 entry.obj = self.func(obj)
1131 entry.obj = self.func(obj)
1131 else:
1132 else:
1132 paths = [self.join(obj, path) for path in self.paths]
1133 paths = [self.join(obj, path) for path in self.paths]
1133
1134
1134 # We stat -before- creating the object so our cache doesn't lie if
1135 # We stat -before- creating the object so our cache doesn't lie if
1135 # a writer modified between the time we read and stat
1136 # a writer modified between the time we read and stat
1136 entry = filecacheentry(paths, True)
1137 entry = filecacheentry(paths, True)
1137 entry.obj = self.func(obj)
1138 entry.obj = self.func(obj)
1138
1139
1139 obj._filecache[self.name] = entry
1140 obj._filecache[self.name] = entry
1140
1141
1141 obj.__dict__[self.name] = entry.obj
1142 obj.__dict__[self.name] = entry.obj
1142 return entry.obj
1143 return entry.obj
1143
1144
1144 def __set__(self, obj, value):
1145 def __set__(self, obj, value):
1145 if self.name not in obj._filecache:
1146 if self.name not in obj._filecache:
1146 # we add an entry for the missing value because X in __dict__
1147 # we add an entry for the missing value because X in __dict__
1147 # implies X in _filecache
1148 # implies X in _filecache
1148 paths = [self.join(obj, path) for path in self.paths]
1149 paths = [self.join(obj, path) for path in self.paths]
1149 ce = filecacheentry(paths, False)
1150 ce = filecacheentry(paths, False)
1150 obj._filecache[self.name] = ce
1151 obj._filecache[self.name] = ce
1151 else:
1152 else:
1152 ce = obj._filecache[self.name]
1153 ce = obj._filecache[self.name]
1153
1154
1154 ce.obj = value # update cached copy
1155 ce.obj = value # update cached copy
1155 obj.__dict__[self.name] = value # update copy returned by obj.x
1156 obj.__dict__[self.name] = value # update copy returned by obj.x
1156
1157
1157 def __delete__(self, obj):
1158 def __delete__(self, obj):
1158 try:
1159 try:
1159 del obj.__dict__[self.name]
1160 del obj.__dict__[self.name]
1160 except KeyError:
1161 except KeyError:
1161 raise AttributeError(self.name)
1162 raise AttributeError(self.name)
General Comments 0
You need to be logged in to leave comments. Login now