##// END OF EJS Templates
branch: avoid using reserved tag names...
Wagner Bruna -
r10417:58e040c5 stable
parent child Browse files
Show More
@@ -1,228 +1,233 b''
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid
8 from node import bin, hex, nullid
9 from i18n import _
9 from i18n import _
10 import util, error, revlog, encoding
10 import util, error, revlog, encoding
11
11
12 def _string_escape(text):
12 def _string_escape(text):
13 """
13 """
14 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
14 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
15 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
15 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
16 >>> s
16 >>> s
17 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
17 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
18 >>> res = _string_escape(s)
18 >>> res = _string_escape(s)
19 >>> s == res.decode('string_escape')
19 >>> s == res.decode('string_escape')
20 True
20 True
21 """
21 """
22 # subset of the string_escape codec
22 # subset of the string_escape codec
23 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
23 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
24 return text.replace('\0', '\\0')
24 return text.replace('\0', '\\0')
25
25
26 def decodeextra(text):
26 def decodeextra(text):
27 extra = {}
27 extra = {}
28 for l in text.split('\0'):
28 for l in text.split('\0'):
29 if l:
29 if l:
30 k, v = l.decode('string_escape').split(':', 1)
30 k, v = l.decode('string_escape').split(':', 1)
31 extra[k] = v
31 extra[k] = v
32 return extra
32 return extra
33
33
34 def encodeextra(d):
34 def encodeextra(d):
35 # keys must be sorted to produce a deterministic changelog entry
35 # keys must be sorted to produce a deterministic changelog entry
36 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
36 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
37 return "\0".join(items)
37 return "\0".join(items)
38
38
39 class appender(object):
39 class appender(object):
40 '''the changelog index must be updated last on disk, so we use this class
40 '''the changelog index must be updated last on disk, so we use this class
41 to delay writes to it'''
41 to delay writes to it'''
42 def __init__(self, fp, buf):
42 def __init__(self, fp, buf):
43 self.data = buf
43 self.data = buf
44 self.fp = fp
44 self.fp = fp
45 self.offset = fp.tell()
45 self.offset = fp.tell()
46 self.size = util.fstat(fp).st_size
46 self.size = util.fstat(fp).st_size
47
47
48 def end(self):
48 def end(self):
49 return self.size + len("".join(self.data))
49 return self.size + len("".join(self.data))
50 def tell(self):
50 def tell(self):
51 return self.offset
51 return self.offset
52 def flush(self):
52 def flush(self):
53 pass
53 pass
54 def close(self):
54 def close(self):
55 self.fp.close()
55 self.fp.close()
56
56
57 def seek(self, offset, whence=0):
57 def seek(self, offset, whence=0):
58 '''virtual file offset spans real file and data'''
58 '''virtual file offset spans real file and data'''
59 if whence == 0:
59 if whence == 0:
60 self.offset = offset
60 self.offset = offset
61 elif whence == 1:
61 elif whence == 1:
62 self.offset += offset
62 self.offset += offset
63 elif whence == 2:
63 elif whence == 2:
64 self.offset = self.end() + offset
64 self.offset = self.end() + offset
65 if self.offset < self.size:
65 if self.offset < self.size:
66 self.fp.seek(self.offset)
66 self.fp.seek(self.offset)
67
67
68 def read(self, count=-1):
68 def read(self, count=-1):
69 '''only trick here is reads that span real file and data'''
69 '''only trick here is reads that span real file and data'''
70 ret = ""
70 ret = ""
71 if self.offset < self.size:
71 if self.offset < self.size:
72 s = self.fp.read(count)
72 s = self.fp.read(count)
73 ret = s
73 ret = s
74 self.offset += len(s)
74 self.offset += len(s)
75 if count > 0:
75 if count > 0:
76 count -= len(s)
76 count -= len(s)
77 if count != 0:
77 if count != 0:
78 doff = self.offset - self.size
78 doff = self.offset - self.size
79 self.data.insert(0, "".join(self.data))
79 self.data.insert(0, "".join(self.data))
80 del self.data[1:]
80 del self.data[1:]
81 s = self.data[0][doff:doff+count]
81 s = self.data[0][doff:doff+count]
82 self.offset += len(s)
82 self.offset += len(s)
83 ret += s
83 ret += s
84 return ret
84 return ret
85
85
86 def write(self, s):
86 def write(self, s):
87 self.data.append(str(s))
87 self.data.append(str(s))
88 self.offset += len(s)
88 self.offset += len(s)
89
89
90 def delayopener(opener, target, divert, buf):
90 def delayopener(opener, target, divert, buf):
91 def o(name, mode='r'):
91 def o(name, mode='r'):
92 if name != target:
92 if name != target:
93 return opener(name, mode)
93 return opener(name, mode)
94 if divert:
94 if divert:
95 return opener(name + ".a", mode.replace('a', 'w'))
95 return opener(name + ".a", mode.replace('a', 'w'))
96 # otherwise, divert to memory
96 # otherwise, divert to memory
97 return appender(opener(name, mode), buf)
97 return appender(opener(name, mode), buf)
98 return o
98 return o
99
99
100 class changelog(revlog.revlog):
100 class changelog(revlog.revlog):
101 def __init__(self, opener):
101 def __init__(self, opener):
102 revlog.revlog.__init__(self, opener, "00changelog.i")
102 revlog.revlog.__init__(self, opener, "00changelog.i")
103 self._realopener = opener
103 self._realopener = opener
104 self._delayed = False
104 self._delayed = False
105 self._divert = False
105 self._divert = False
106
106
107 def delayupdate(self):
107 def delayupdate(self):
108 "delay visibility of index updates to other readers"
108 "delay visibility of index updates to other readers"
109 self._delayed = True
109 self._delayed = True
110 self._divert = (len(self) == 0)
110 self._divert = (len(self) == 0)
111 self._delaybuf = []
111 self._delaybuf = []
112 self.opener = delayopener(self._realopener, self.indexfile,
112 self.opener = delayopener(self._realopener, self.indexfile,
113 self._divert, self._delaybuf)
113 self._divert, self._delaybuf)
114
114
115 def finalize(self, tr):
115 def finalize(self, tr):
116 "finalize index updates"
116 "finalize index updates"
117 self._delayed = False
117 self._delayed = False
118 self.opener = self._realopener
118 self.opener = self._realopener
119 # move redirected index data back into place
119 # move redirected index data back into place
120 if self._divert:
120 if self._divert:
121 n = self.opener(self.indexfile + ".a").name
121 n = self.opener(self.indexfile + ".a").name
122 util.rename(n, n[:-2])
122 util.rename(n, n[:-2])
123 elif self._delaybuf:
123 elif self._delaybuf:
124 fp = self.opener(self.indexfile, 'a')
124 fp = self.opener(self.indexfile, 'a')
125 fp.write("".join(self._delaybuf))
125 fp.write("".join(self._delaybuf))
126 fp.close()
126 fp.close()
127 self._delaybuf = []
127 self._delaybuf = []
128 # split when we're done
128 # split when we're done
129 self.checkinlinesize(tr)
129 self.checkinlinesize(tr)
130
130
131 def readpending(self, file):
131 def readpending(self, file):
132 r = revlog.revlog(self.opener, file)
132 r = revlog.revlog(self.opener, file)
133 self.index = r.index
133 self.index = r.index
134 self.nodemap = r.nodemap
134 self.nodemap = r.nodemap
135 self._chunkcache = r._chunkcache
135 self._chunkcache = r._chunkcache
136
136
137 def writepending(self):
137 def writepending(self):
138 "create a file containing the unfinalized state for pretxnchangegroup"
138 "create a file containing the unfinalized state for pretxnchangegroup"
139 if self._delaybuf:
139 if self._delaybuf:
140 # make a temporary copy of the index
140 # make a temporary copy of the index
141 fp1 = self._realopener(self.indexfile)
141 fp1 = self._realopener(self.indexfile)
142 fp2 = self._realopener(self.indexfile + ".a", "w")
142 fp2 = self._realopener(self.indexfile + ".a", "w")
143 fp2.write(fp1.read())
143 fp2.write(fp1.read())
144 # add pending data
144 # add pending data
145 fp2.write("".join(self._delaybuf))
145 fp2.write("".join(self._delaybuf))
146 fp2.close()
146 fp2.close()
147 # switch modes so finalize can simply rename
147 # switch modes so finalize can simply rename
148 self._delaybuf = []
148 self._delaybuf = []
149 self._divert = True
149 self._divert = True
150
150
151 if self._divert:
151 if self._divert:
152 return True
152 return True
153
153
154 return False
154 return False
155
155
156 def checkinlinesize(self, tr, fp=None):
156 def checkinlinesize(self, tr, fp=None):
157 if not self._delayed:
157 if not self._delayed:
158 revlog.revlog.checkinlinesize(self, tr, fp)
158 revlog.revlog.checkinlinesize(self, tr, fp)
159
159
160 def read(self, node):
160 def read(self, node):
161 """
161 """
162 format used:
162 format used:
163 nodeid\n : manifest node in ascii
163 nodeid\n : manifest node in ascii
164 user\n : user, no \n or \r allowed
164 user\n : user, no \n or \r allowed
165 time tz extra\n : date (time is int or float, timezone is int)
165 time tz extra\n : date (time is int or float, timezone is int)
166 : extra is metadatas, encoded and separated by '\0'
166 : extra is metadatas, encoded and separated by '\0'
167 : older versions ignore it
167 : older versions ignore it
168 files\n\n : files modified by the cset, no \n or \r allowed
168 files\n\n : files modified by the cset, no \n or \r allowed
169 (.*) : comment (free text, ideally utf-8)
169 (.*) : comment (free text, ideally utf-8)
170
170
171 changelog v0 doesn't use extra
171 changelog v0 doesn't use extra
172 """
172 """
173 text = self.revision(node)
173 text = self.revision(node)
174 if not text:
174 if not text:
175 return (nullid, "", (0, 0), [], "", {'branch': 'default'})
175 return (nullid, "", (0, 0), [], "", {'branch': 'default'})
176 last = text.index("\n\n")
176 last = text.index("\n\n")
177 desc = encoding.tolocal(text[last + 2:])
177 desc = encoding.tolocal(text[last + 2:])
178 l = text[:last].split('\n')
178 l = text[:last].split('\n')
179 manifest = bin(l[0])
179 manifest = bin(l[0])
180 user = encoding.tolocal(l[1])
180 user = encoding.tolocal(l[1])
181
181
182 extra_data = l[2].split(' ', 2)
182 extra_data = l[2].split(' ', 2)
183 if len(extra_data) != 3:
183 if len(extra_data) != 3:
184 time = float(extra_data.pop(0))
184 time = float(extra_data.pop(0))
185 try:
185 try:
186 # various tools did silly things with the time zone field.
186 # various tools did silly things with the time zone field.
187 timezone = int(extra_data[0])
187 timezone = int(extra_data[0])
188 except:
188 except:
189 timezone = 0
189 timezone = 0
190 extra = {}
190 extra = {}
191 else:
191 else:
192 time, timezone, extra = extra_data
192 time, timezone, extra = extra_data
193 time, timezone = float(time), int(timezone)
193 time, timezone = float(time), int(timezone)
194 extra = decodeextra(extra)
194 extra = decodeextra(extra)
195 if not extra.get('branch'):
195 if not extra.get('branch'):
196 extra['branch'] = 'default'
196 extra['branch'] = 'default'
197 files = l[3:]
197 files = l[3:]
198 return (manifest, user, (time, timezone), files, desc, extra)
198 return (manifest, user, (time, timezone), files, desc, extra)
199
199
200 def add(self, manifest, files, desc, transaction, p1, p2,
200 def add(self, manifest, files, desc, transaction, p1, p2,
201 user, date=None, extra=None):
201 user, date=None, extra=None):
202 user = user.strip()
202 user = user.strip()
203 # An empty username or a username with a "\n" will make the
203 # An empty username or a username with a "\n" will make the
204 # revision text contain two "\n\n" sequences -> corrupt
204 # revision text contain two "\n\n" sequences -> corrupt
205 # repository since read cannot unpack the revision.
205 # repository since read cannot unpack the revision.
206 if not user:
206 if not user:
207 raise error.RevlogError(_("empty username"))
207 raise error.RevlogError(_("empty username"))
208 if "\n" in user:
208 if "\n" in user:
209 raise error.RevlogError(_("username %s contains a newline")
209 raise error.RevlogError(_("username %s contains a newline")
210 % repr(user))
210 % repr(user))
211
211
212 # strip trailing whitespace and leading and trailing empty lines
212 # strip trailing whitespace and leading and trailing empty lines
213 desc = '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
213 desc = '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
214
214
215 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
215 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
216
216
217 if date:
217 if date:
218 parseddate = "%d %d" % util.parsedate(date)
218 parseddate = "%d %d" % util.parsedate(date)
219 else:
219 else:
220 parseddate = "%d %d" % util.makedate()
220 parseddate = "%d %d" % util.makedate()
221 if extra and extra.get("branch") in ("default", ""):
221 if extra:
222 branch = extra.get("branch")
223 if branch in ("default", ""):
222 del extra["branch"]
224 del extra["branch"]
225 elif branch in (".", "null", "tip"):
226 raise error.RevlogError(_('the name \'%s\' is reserved')
227 % branch)
223 if extra:
228 if extra:
224 extra = encodeextra(extra)
229 extra = encodeextra(extra)
225 parseddate = "%s %s" % (parseddate, extra)
230 parseddate = "%s %s" % (parseddate, extra)
226 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
231 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
227 text = "\n".join(l)
232 text = "\n".join(l)
228 return self.addrevision(text, transaction, len(self), p1, p2)
233 return self.addrevision(text, transaction, len(self), p1, p2)
@@ -1,641 +1,643 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid
8 from node import nullid
9 from i18n import _
9 from i18n import _
10 import util, ignore, osutil, parsers
10 import util, ignore, osutil, parsers
11 import struct, os, stat, errno
11 import struct, os, stat, errno
12 import cStringIO
12 import cStringIO
13
13
14 _unknown = ('?', 0, 0, 0)
14 _unknown = ('?', 0, 0, 0)
15 _format = ">cllll"
15 _format = ">cllll"
16 propertycache = util.propertycache
16 propertycache = util.propertycache
17
17
18 def _finddirs(path):
18 def _finddirs(path):
19 pos = path.rfind('/')
19 pos = path.rfind('/')
20 while pos != -1:
20 while pos != -1:
21 yield path[:pos]
21 yield path[:pos]
22 pos = path.rfind('/', 0, pos)
22 pos = path.rfind('/', 0, pos)
23
23
24 def _incdirs(dirs, path):
24 def _incdirs(dirs, path):
25 for base in _finddirs(path):
25 for base in _finddirs(path):
26 if base in dirs:
26 if base in dirs:
27 dirs[base] += 1
27 dirs[base] += 1
28 return
28 return
29 dirs[base] = 1
29 dirs[base] = 1
30
30
31 def _decdirs(dirs, path):
31 def _decdirs(dirs, path):
32 for base in _finddirs(path):
32 for base in _finddirs(path):
33 if dirs[base] > 1:
33 if dirs[base] > 1:
34 dirs[base] -= 1
34 dirs[base] -= 1
35 return
35 return
36 del dirs[base]
36 del dirs[base]
37
37
38 class dirstate(object):
38 class dirstate(object):
39
39
40 def __init__(self, opener, ui, root):
40 def __init__(self, opener, ui, root):
41 '''Create a new dirstate object. opener is an open()-like callable
41 '''Create a new dirstate object. opener is an open()-like callable
42 that can be used to open the dirstate file; root is the root of the
42 that can be used to open the dirstate file; root is the root of the
43 directory tracked by the dirstate.'''
43 directory tracked by the dirstate.'''
44 self._opener = opener
44 self._opener = opener
45 self._root = root
45 self._root = root
46 self._rootdir = os.path.join(root, '')
46 self._rootdir = os.path.join(root, '')
47 self._dirty = False
47 self._dirty = False
48 self._dirtypl = False
48 self._dirtypl = False
49 self._ui = ui
49 self._ui = ui
50
50
51 @propertycache
51 @propertycache
52 def _map(self):
52 def _map(self):
53 '''Return the dirstate contents as a map from filename to
53 '''Return the dirstate contents as a map from filename to
54 (state, mode, size, time).'''
54 (state, mode, size, time).'''
55 self._read()
55 self._read()
56 return self._map
56 return self._map
57
57
58 @propertycache
58 @propertycache
59 def _copymap(self):
59 def _copymap(self):
60 self._read()
60 self._read()
61 return self._copymap
61 return self._copymap
62
62
63 @propertycache
63 @propertycache
64 def _foldmap(self):
64 def _foldmap(self):
65 f = {}
65 f = {}
66 for name in self._map:
66 for name in self._map:
67 f[os.path.normcase(name)] = name
67 f[os.path.normcase(name)] = name
68 return f
68 return f
69
69
70 @propertycache
70 @propertycache
71 def _branch(self):
71 def _branch(self):
72 try:
72 try:
73 return self._opener("branch").read().strip() or "default"
73 return self._opener("branch").read().strip() or "default"
74 except IOError:
74 except IOError:
75 return "default"
75 return "default"
76
76
77 @propertycache
77 @propertycache
78 def _pl(self):
78 def _pl(self):
79 try:
79 try:
80 st = self._opener("dirstate").read(40)
80 st = self._opener("dirstate").read(40)
81 l = len(st)
81 l = len(st)
82 if l == 40:
82 if l == 40:
83 return st[:20], st[20:40]
83 return st[:20], st[20:40]
84 elif l > 0 and l < 40:
84 elif l > 0 and l < 40:
85 raise util.Abort(_('working directory state appears damaged!'))
85 raise util.Abort(_('working directory state appears damaged!'))
86 except IOError, err:
86 except IOError, err:
87 if err.errno != errno.ENOENT: raise
87 if err.errno != errno.ENOENT: raise
88 return [nullid, nullid]
88 return [nullid, nullid]
89
89
90 @propertycache
90 @propertycache
91 def _dirs(self):
91 def _dirs(self):
92 dirs = {}
92 dirs = {}
93 for f,s in self._map.iteritems():
93 for f,s in self._map.iteritems():
94 if s[0] != 'r':
94 if s[0] != 'r':
95 _incdirs(dirs, f)
95 _incdirs(dirs, f)
96 return dirs
96 return dirs
97
97
98 @propertycache
98 @propertycache
99 def _ignore(self):
99 def _ignore(self):
100 files = [self._join('.hgignore')]
100 files = [self._join('.hgignore')]
101 for name, path in self._ui.configitems("ui"):
101 for name, path in self._ui.configitems("ui"):
102 if name == 'ignore' or name.startswith('ignore.'):
102 if name == 'ignore' or name.startswith('ignore.'):
103 files.append(util.expandpath(path))
103 files.append(util.expandpath(path))
104 return ignore.ignore(self._root, files, self._ui.warn)
104 return ignore.ignore(self._root, files, self._ui.warn)
105
105
106 @propertycache
106 @propertycache
107 def _slash(self):
107 def _slash(self):
108 return self._ui.configbool('ui', 'slash') and os.sep != '/'
108 return self._ui.configbool('ui', 'slash') and os.sep != '/'
109
109
110 @propertycache
110 @propertycache
111 def _checklink(self):
111 def _checklink(self):
112 return util.checklink(self._root)
112 return util.checklink(self._root)
113
113
114 @propertycache
114 @propertycache
115 def _checkexec(self):
115 def _checkexec(self):
116 return util.checkexec(self._root)
116 return util.checkexec(self._root)
117
117
118 @propertycache
118 @propertycache
119 def _checkcase(self):
119 def _checkcase(self):
120 return not util.checkcase(self._join('.hg'))
120 return not util.checkcase(self._join('.hg'))
121
121
122 def _join(self, f):
122 def _join(self, f):
123 # much faster than os.path.join()
123 # much faster than os.path.join()
124 # it's safe because f is always a relative path
124 # it's safe because f is always a relative path
125 return self._rootdir + f
125 return self._rootdir + f
126
126
127 def flagfunc(self, fallback):
127 def flagfunc(self, fallback):
128 if self._checklink:
128 if self._checklink:
129 if self._checkexec:
129 if self._checkexec:
130 def f(x):
130 def f(x):
131 p = self._join(x)
131 p = self._join(x)
132 if os.path.islink(p):
132 if os.path.islink(p):
133 return 'l'
133 return 'l'
134 if util.is_exec(p):
134 if util.is_exec(p):
135 return 'x'
135 return 'x'
136 return ''
136 return ''
137 return f
137 return f
138 def f(x):
138 def f(x):
139 if os.path.islink(self._join(x)):
139 if os.path.islink(self._join(x)):
140 return 'l'
140 return 'l'
141 if 'x' in fallback(x):
141 if 'x' in fallback(x):
142 return 'x'
142 return 'x'
143 return ''
143 return ''
144 return f
144 return f
145 if self._checkexec:
145 if self._checkexec:
146 def f(x):
146 def f(x):
147 if 'l' in fallback(x):
147 if 'l' in fallback(x):
148 return 'l'
148 return 'l'
149 if util.is_exec(self._join(x)):
149 if util.is_exec(self._join(x)):
150 return 'x'
150 return 'x'
151 return ''
151 return ''
152 return f
152 return f
153 return fallback
153 return fallback
154
154
155 def getcwd(self):
155 def getcwd(self):
156 cwd = os.getcwd()
156 cwd = os.getcwd()
157 if cwd == self._root: return ''
157 if cwd == self._root: return ''
158 # self._root ends with a path separator if self._root is '/' or 'C:\'
158 # self._root ends with a path separator if self._root is '/' or 'C:\'
159 rootsep = self._root
159 rootsep = self._root
160 if not util.endswithsep(rootsep):
160 if not util.endswithsep(rootsep):
161 rootsep += os.sep
161 rootsep += os.sep
162 if cwd.startswith(rootsep):
162 if cwd.startswith(rootsep):
163 return cwd[len(rootsep):]
163 return cwd[len(rootsep):]
164 else:
164 else:
165 # we're outside the repo. return an absolute path.
165 # we're outside the repo. return an absolute path.
166 return cwd
166 return cwd
167
167
168 def pathto(self, f, cwd=None):
168 def pathto(self, f, cwd=None):
169 if cwd is None:
169 if cwd is None:
170 cwd = self.getcwd()
170 cwd = self.getcwd()
171 path = util.pathto(self._root, cwd, f)
171 path = util.pathto(self._root, cwd, f)
172 if self._slash:
172 if self._slash:
173 return util.normpath(path)
173 return util.normpath(path)
174 return path
174 return path
175
175
176 def __getitem__(self, key):
176 def __getitem__(self, key):
177 '''Return the current state of key (a filename) in the dirstate.
177 '''Return the current state of key (a filename) in the dirstate.
178 States are:
178 States are:
179 n normal
179 n normal
180 m needs merging
180 m needs merging
181 r marked for removal
181 r marked for removal
182 a marked for addition
182 a marked for addition
183 ? not tracked
183 ? not tracked
184 '''
184 '''
185 return self._map.get(key, ("?",))[0]
185 return self._map.get(key, ("?",))[0]
186
186
187 def __contains__(self, key):
187 def __contains__(self, key):
188 return key in self._map
188 return key in self._map
189
189
190 def __iter__(self):
190 def __iter__(self):
191 for x in sorted(self._map):
191 for x in sorted(self._map):
192 yield x
192 yield x
193
193
194 def parents(self):
194 def parents(self):
195 return self._pl
195 return self._pl
196
196
197 def branch(self):
197 def branch(self):
198 return self._branch
198 return self._branch
199
199
200 def setparents(self, p1, p2=nullid):
200 def setparents(self, p1, p2=nullid):
201 self._dirty = self._dirtypl = True
201 self._dirty = self._dirtypl = True
202 self._pl = p1, p2
202 self._pl = p1, p2
203
203
204 def setbranch(self, branch):
204 def setbranch(self, branch):
205 if branch in ['tip', '.', 'null']:
206 raise util.Abort(_('the name \'%s\' is reserved') % branch)
205 self._branch = branch
207 self._branch = branch
206 self._opener("branch", "w").write(branch + '\n')
208 self._opener("branch", "w").write(branch + '\n')
207
209
208 def _read(self):
210 def _read(self):
209 self._map = {}
211 self._map = {}
210 self._copymap = {}
212 self._copymap = {}
211 try:
213 try:
212 st = self._opener("dirstate").read()
214 st = self._opener("dirstate").read()
213 except IOError, err:
215 except IOError, err:
214 if err.errno != errno.ENOENT: raise
216 if err.errno != errno.ENOENT: raise
215 return
217 return
216 if not st:
218 if not st:
217 return
219 return
218
220
219 p = parsers.parse_dirstate(self._map, self._copymap, st)
221 p = parsers.parse_dirstate(self._map, self._copymap, st)
220 if not self._dirtypl:
222 if not self._dirtypl:
221 self._pl = p
223 self._pl = p
222
224
223 def invalidate(self):
225 def invalidate(self):
224 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
226 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
225 if a in self.__dict__:
227 if a in self.__dict__:
226 delattr(self, a)
228 delattr(self, a)
227 self._dirty = False
229 self._dirty = False
228
230
229 def copy(self, source, dest):
231 def copy(self, source, dest):
230 """Mark dest as a copy of source. Unmark dest if source is None.
232 """Mark dest as a copy of source. Unmark dest if source is None.
231 """
233 """
232 if source == dest:
234 if source == dest:
233 return
235 return
234 self._dirty = True
236 self._dirty = True
235 if source is not None:
237 if source is not None:
236 self._copymap[dest] = source
238 self._copymap[dest] = source
237 elif dest in self._copymap:
239 elif dest in self._copymap:
238 del self._copymap[dest]
240 del self._copymap[dest]
239
241
240 def copied(self, file):
242 def copied(self, file):
241 return self._copymap.get(file, None)
243 return self._copymap.get(file, None)
242
244
243 def copies(self):
245 def copies(self):
244 return self._copymap
246 return self._copymap
245
247
246 def _droppath(self, f):
248 def _droppath(self, f):
247 if self[f] not in "?r" and "_dirs" in self.__dict__:
249 if self[f] not in "?r" and "_dirs" in self.__dict__:
248 _decdirs(self._dirs, f)
250 _decdirs(self._dirs, f)
249
251
250 def _addpath(self, f, check=False):
252 def _addpath(self, f, check=False):
251 oldstate = self[f]
253 oldstate = self[f]
252 if check or oldstate == "r":
254 if check or oldstate == "r":
253 if '\r' in f or '\n' in f:
255 if '\r' in f or '\n' in f:
254 raise util.Abort(
256 raise util.Abort(
255 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
257 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
256 if f in self._dirs:
258 if f in self._dirs:
257 raise util.Abort(_('directory %r already in dirstate') % f)
259 raise util.Abort(_('directory %r already in dirstate') % f)
258 # shadows
260 # shadows
259 for d in _finddirs(f):
261 for d in _finddirs(f):
260 if d in self._dirs:
262 if d in self._dirs:
261 break
263 break
262 if d in self._map and self[d] != 'r':
264 if d in self._map and self[d] != 'r':
263 raise util.Abort(
265 raise util.Abort(
264 _('file %r in dirstate clashes with %r') % (d, f))
266 _('file %r in dirstate clashes with %r') % (d, f))
265 if oldstate in "?r" and "_dirs" in self.__dict__:
267 if oldstate in "?r" and "_dirs" in self.__dict__:
266 _incdirs(self._dirs, f)
268 _incdirs(self._dirs, f)
267
269
268 def normal(self, f):
270 def normal(self, f):
269 'mark a file normal and clean'
271 'mark a file normal and clean'
270 self._dirty = True
272 self._dirty = True
271 self._addpath(f)
273 self._addpath(f)
272 s = os.lstat(self._join(f))
274 s = os.lstat(self._join(f))
273 self._map[f] = ('n', s.st_mode, s.st_size, int(s.st_mtime))
275 self._map[f] = ('n', s.st_mode, s.st_size, int(s.st_mtime))
274 if f in self._copymap:
276 if f in self._copymap:
275 del self._copymap[f]
277 del self._copymap[f]
276
278
277 def normallookup(self, f):
279 def normallookup(self, f):
278 'mark a file normal, but possibly dirty'
280 'mark a file normal, but possibly dirty'
279 if self._pl[1] != nullid and f in self._map:
281 if self._pl[1] != nullid and f in self._map:
280 # if there is a merge going on and the file was either
282 # if there is a merge going on and the file was either
281 # in state 'm' or dirty before being removed, restore that state.
283 # in state 'm' or dirty before being removed, restore that state.
282 entry = self._map[f]
284 entry = self._map[f]
283 if entry[0] == 'r' and entry[2] in (-1, -2):
285 if entry[0] == 'r' and entry[2] in (-1, -2):
284 source = self._copymap.get(f)
286 source = self._copymap.get(f)
285 if entry[2] == -1:
287 if entry[2] == -1:
286 self.merge(f)
288 self.merge(f)
287 elif entry[2] == -2:
289 elif entry[2] == -2:
288 self.normaldirty(f)
290 self.normaldirty(f)
289 if source:
291 if source:
290 self.copy(source, f)
292 self.copy(source, f)
291 return
293 return
292 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
294 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
293 return
295 return
294 self._dirty = True
296 self._dirty = True
295 self._addpath(f)
297 self._addpath(f)
296 self._map[f] = ('n', 0, -1, -1)
298 self._map[f] = ('n', 0, -1, -1)
297 if f in self._copymap:
299 if f in self._copymap:
298 del self._copymap[f]
300 del self._copymap[f]
299
301
300 def normaldirty(self, f):
302 def normaldirty(self, f):
301 'mark a file normal, but dirty'
303 'mark a file normal, but dirty'
302 self._dirty = True
304 self._dirty = True
303 self._addpath(f)
305 self._addpath(f)
304 self._map[f] = ('n', 0, -2, -1)
306 self._map[f] = ('n', 0, -2, -1)
305 if f in self._copymap:
307 if f in self._copymap:
306 del self._copymap[f]
308 del self._copymap[f]
307
309
308 def add(self, f):
310 def add(self, f):
309 'mark a file added'
311 'mark a file added'
310 self._dirty = True
312 self._dirty = True
311 self._addpath(f, True)
313 self._addpath(f, True)
312 self._map[f] = ('a', 0, -1, -1)
314 self._map[f] = ('a', 0, -1, -1)
313 if f in self._copymap:
315 if f in self._copymap:
314 del self._copymap[f]
316 del self._copymap[f]
315
317
316 def remove(self, f):
318 def remove(self, f):
317 'mark a file removed'
319 'mark a file removed'
318 self._dirty = True
320 self._dirty = True
319 self._droppath(f)
321 self._droppath(f)
320 size = 0
322 size = 0
321 if self._pl[1] != nullid and f in self._map:
323 if self._pl[1] != nullid and f in self._map:
322 entry = self._map[f]
324 entry = self._map[f]
323 if entry[0] == 'm':
325 if entry[0] == 'm':
324 size = -1
326 size = -1
325 elif entry[0] == 'n' and entry[2] == -2:
327 elif entry[0] == 'n' and entry[2] == -2:
326 size = -2
328 size = -2
327 self._map[f] = ('r', 0, size, 0)
329 self._map[f] = ('r', 0, size, 0)
328 if size == 0 and f in self._copymap:
330 if size == 0 and f in self._copymap:
329 del self._copymap[f]
331 del self._copymap[f]
330
332
331 def merge(self, f):
333 def merge(self, f):
332 'mark a file merged'
334 'mark a file merged'
333 self._dirty = True
335 self._dirty = True
334 s = os.lstat(self._join(f))
336 s = os.lstat(self._join(f))
335 self._addpath(f)
337 self._addpath(f)
336 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
338 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
337 if f in self._copymap:
339 if f in self._copymap:
338 del self._copymap[f]
340 del self._copymap[f]
339
341
340 def forget(self, f):
342 def forget(self, f):
341 'forget a file'
343 'forget a file'
342 self._dirty = True
344 self._dirty = True
343 try:
345 try:
344 self._droppath(f)
346 self._droppath(f)
345 del self._map[f]
347 del self._map[f]
346 except KeyError:
348 except KeyError:
347 self._ui.warn(_("not in dirstate: %s\n") % f)
349 self._ui.warn(_("not in dirstate: %s\n") % f)
348
350
349 def _normalize(self, path, knownpath):
351 def _normalize(self, path, knownpath):
350 norm_path = os.path.normcase(path)
352 norm_path = os.path.normcase(path)
351 fold_path = self._foldmap.get(norm_path, None)
353 fold_path = self._foldmap.get(norm_path, None)
352 if fold_path is None:
354 if fold_path is None:
353 if knownpath or not os.path.exists(os.path.join(self._root, path)):
355 if knownpath or not os.path.exists(os.path.join(self._root, path)):
354 fold_path = path
356 fold_path = path
355 else:
357 else:
356 fold_path = self._foldmap.setdefault(norm_path,
358 fold_path = self._foldmap.setdefault(norm_path,
357 util.fspath(path, self._root))
359 util.fspath(path, self._root))
358 return fold_path
360 return fold_path
359
361
360 def clear(self):
362 def clear(self):
361 self._map = {}
363 self._map = {}
362 if "_dirs" in self.__dict__:
364 if "_dirs" in self.__dict__:
363 delattr(self, "_dirs");
365 delattr(self, "_dirs");
364 self._copymap = {}
366 self._copymap = {}
365 self._pl = [nullid, nullid]
367 self._pl = [nullid, nullid]
366 self._dirty = True
368 self._dirty = True
367
369
368 def rebuild(self, parent, files):
370 def rebuild(self, parent, files):
369 self.clear()
371 self.clear()
370 for f in files:
372 for f in files:
371 if 'x' in files.flags(f):
373 if 'x' in files.flags(f):
372 self._map[f] = ('n', 0777, -1, 0)
374 self._map[f] = ('n', 0777, -1, 0)
373 else:
375 else:
374 self._map[f] = ('n', 0666, -1, 0)
376 self._map[f] = ('n', 0666, -1, 0)
375 self._pl = (parent, nullid)
377 self._pl = (parent, nullid)
376 self._dirty = True
378 self._dirty = True
377
379
378 def write(self):
380 def write(self):
379 if not self._dirty:
381 if not self._dirty:
380 return
382 return
381 st = self._opener("dirstate", "w", atomictemp=True)
383 st = self._opener("dirstate", "w", atomictemp=True)
382
384
383 # use the modification time of the newly created temporary file as the
385 # use the modification time of the newly created temporary file as the
384 # filesystem's notion of 'now'
386 # filesystem's notion of 'now'
385 now = int(util.fstat(st).st_mtime)
387 now = int(util.fstat(st).st_mtime)
386
388
387 cs = cStringIO.StringIO()
389 cs = cStringIO.StringIO()
388 copymap = self._copymap
390 copymap = self._copymap
389 pack = struct.pack
391 pack = struct.pack
390 write = cs.write
392 write = cs.write
391 write("".join(self._pl))
393 write("".join(self._pl))
392 for f, e in self._map.iteritems():
394 for f, e in self._map.iteritems():
393 if f in copymap:
395 if f in copymap:
394 f = "%s\0%s" % (f, copymap[f])
396 f = "%s\0%s" % (f, copymap[f])
395
397
396 if e[0] == 'n' and e[3] == now:
398 if e[0] == 'n' and e[3] == now:
397 # The file was last modified "simultaneously" with the current
399 # The file was last modified "simultaneously" with the current
398 # write to dirstate (i.e. within the same second for file-
400 # write to dirstate (i.e. within the same second for file-
399 # systems with a granularity of 1 sec). This commonly happens
401 # systems with a granularity of 1 sec). This commonly happens
400 # for at least a couple of files on 'update'.
402 # for at least a couple of files on 'update'.
401 # The user could change the file without changing its size
403 # The user could change the file without changing its size
402 # within the same second. Invalidate the file's stat data in
404 # within the same second. Invalidate the file's stat data in
403 # dirstate, forcing future 'status' calls to compare the
405 # dirstate, forcing future 'status' calls to compare the
404 # contents of the file. This prevents mistakenly treating such
406 # contents of the file. This prevents mistakenly treating such
405 # files as clean.
407 # files as clean.
406 e = (e[0], 0, -1, -1) # mark entry as 'unset'
408 e = (e[0], 0, -1, -1) # mark entry as 'unset'
407
409
408 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
410 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
409 write(e)
411 write(e)
410 write(f)
412 write(f)
411 st.write(cs.getvalue())
413 st.write(cs.getvalue())
412 st.rename()
414 st.rename()
413 self._dirty = self._dirtypl = False
415 self._dirty = self._dirtypl = False
414
416
415 def _dirignore(self, f):
417 def _dirignore(self, f):
416 if f == '.':
418 if f == '.':
417 return False
419 return False
418 if self._ignore(f):
420 if self._ignore(f):
419 return True
421 return True
420 for p in _finddirs(f):
422 for p in _finddirs(f):
421 if self._ignore(p):
423 if self._ignore(p):
422 return True
424 return True
423 return False
425 return False
424
426
425 def walk(self, match, unknown, ignored):
427 def walk(self, match, unknown, ignored):
426 '''
428 '''
427 Walk recursively through the directory tree, finding all files
429 Walk recursively through the directory tree, finding all files
428 matched by match.
430 matched by match.
429
431
430 Return a dict mapping filename to stat-like object (either
432 Return a dict mapping filename to stat-like object (either
431 mercurial.osutil.stat instance or return value of os.stat()).
433 mercurial.osutil.stat instance or return value of os.stat()).
432 '''
434 '''
433
435
434 def fwarn(f, msg):
436 def fwarn(f, msg):
435 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
437 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
436 return False
438 return False
437
439
438 def badtype(mode):
440 def badtype(mode):
439 kind = _('unknown')
441 kind = _('unknown')
440 if stat.S_ISCHR(mode): kind = _('character device')
442 if stat.S_ISCHR(mode): kind = _('character device')
441 elif stat.S_ISBLK(mode): kind = _('block device')
443 elif stat.S_ISBLK(mode): kind = _('block device')
442 elif stat.S_ISFIFO(mode): kind = _('fifo')
444 elif stat.S_ISFIFO(mode): kind = _('fifo')
443 elif stat.S_ISSOCK(mode): kind = _('socket')
445 elif stat.S_ISSOCK(mode): kind = _('socket')
444 elif stat.S_ISDIR(mode): kind = _('directory')
446 elif stat.S_ISDIR(mode): kind = _('directory')
445 return _('unsupported file type (type is %s)') % kind
447 return _('unsupported file type (type is %s)') % kind
446
448
447 ignore = self._ignore
449 ignore = self._ignore
448 dirignore = self._dirignore
450 dirignore = self._dirignore
449 if ignored:
451 if ignored:
450 ignore = util.never
452 ignore = util.never
451 dirignore = util.never
453 dirignore = util.never
452 elif not unknown:
454 elif not unknown:
453 # if unknown and ignored are False, skip step 2
455 # if unknown and ignored are False, skip step 2
454 ignore = util.always
456 ignore = util.always
455 dirignore = util.always
457 dirignore = util.always
456
458
457 matchfn = match.matchfn
459 matchfn = match.matchfn
458 badfn = match.bad
460 badfn = match.bad
459 dmap = self._map
461 dmap = self._map
460 normpath = util.normpath
462 normpath = util.normpath
461 listdir = osutil.listdir
463 listdir = osutil.listdir
462 lstat = os.lstat
464 lstat = os.lstat
463 getkind = stat.S_IFMT
465 getkind = stat.S_IFMT
464 dirkind = stat.S_IFDIR
466 dirkind = stat.S_IFDIR
465 regkind = stat.S_IFREG
467 regkind = stat.S_IFREG
466 lnkkind = stat.S_IFLNK
468 lnkkind = stat.S_IFLNK
467 join = self._join
469 join = self._join
468 work = []
470 work = []
469 wadd = work.append
471 wadd = work.append
470
472
471 if self._checkcase:
473 if self._checkcase:
472 normalize = self._normalize
474 normalize = self._normalize
473 else:
475 else:
474 normalize = lambda x, y: x
476 normalize = lambda x, y: x
475
477
476 exact = skipstep3 = False
478 exact = skipstep3 = False
477 if matchfn == match.exact: # match.exact
479 if matchfn == match.exact: # match.exact
478 exact = True
480 exact = True
479 dirignore = util.always # skip step 2
481 dirignore = util.always # skip step 2
480 elif match.files() and not match.anypats(): # match.match, no patterns
482 elif match.files() and not match.anypats(): # match.match, no patterns
481 skipstep3 = True
483 skipstep3 = True
482
484
483 files = set(match.files())
485 files = set(match.files())
484 if not files or '.' in files:
486 if not files or '.' in files:
485 files = ['']
487 files = ['']
486 results = {'.hg': None}
488 results = {'.hg': None}
487
489
488 # step 1: find all explicit files
490 # step 1: find all explicit files
489 for ff in sorted(files):
491 for ff in sorted(files):
490 nf = normalize(normpath(ff), False)
492 nf = normalize(normpath(ff), False)
491 if nf in results:
493 if nf in results:
492 continue
494 continue
493
495
494 try:
496 try:
495 st = lstat(join(nf))
497 st = lstat(join(nf))
496 kind = getkind(st.st_mode)
498 kind = getkind(st.st_mode)
497 if kind == dirkind:
499 if kind == dirkind:
498 skipstep3 = False
500 skipstep3 = False
499 if nf in dmap:
501 if nf in dmap:
500 #file deleted on disk but still in dirstate
502 #file deleted on disk but still in dirstate
501 results[nf] = None
503 results[nf] = None
502 match.dir(nf)
504 match.dir(nf)
503 if not dirignore(nf):
505 if not dirignore(nf):
504 wadd(nf)
506 wadd(nf)
505 elif kind == regkind or kind == lnkkind:
507 elif kind == regkind or kind == lnkkind:
506 results[nf] = st
508 results[nf] = st
507 else:
509 else:
508 badfn(ff, badtype(kind))
510 badfn(ff, badtype(kind))
509 if nf in dmap:
511 if nf in dmap:
510 results[nf] = None
512 results[nf] = None
511 except OSError, inst:
513 except OSError, inst:
512 if nf in dmap: # does it exactly match a file?
514 if nf in dmap: # does it exactly match a file?
513 results[nf] = None
515 results[nf] = None
514 else: # does it match a directory?
516 else: # does it match a directory?
515 prefix = nf + "/"
517 prefix = nf + "/"
516 for fn in dmap:
518 for fn in dmap:
517 if fn.startswith(prefix):
519 if fn.startswith(prefix):
518 match.dir(nf)
520 match.dir(nf)
519 skipstep3 = False
521 skipstep3 = False
520 break
522 break
521 else:
523 else:
522 badfn(ff, inst.strerror)
524 badfn(ff, inst.strerror)
523
525
524 # step 2: visit subdirectories
526 # step 2: visit subdirectories
525 while work:
527 while work:
526 nd = work.pop()
528 nd = work.pop()
527 skip = None
529 skip = None
528 if nd == '.':
530 if nd == '.':
529 nd = ''
531 nd = ''
530 else:
532 else:
531 skip = '.hg'
533 skip = '.hg'
532 try:
534 try:
533 entries = listdir(join(nd), stat=True, skip=skip)
535 entries = listdir(join(nd), stat=True, skip=skip)
534 except OSError, inst:
536 except OSError, inst:
535 if inst.errno == errno.EACCES:
537 if inst.errno == errno.EACCES:
536 fwarn(nd, inst.strerror)
538 fwarn(nd, inst.strerror)
537 continue
539 continue
538 raise
540 raise
539 for f, kind, st in entries:
541 for f, kind, st in entries:
540 nf = normalize(nd and (nd + "/" + f) or f, True)
542 nf = normalize(nd and (nd + "/" + f) or f, True)
541 if nf not in results:
543 if nf not in results:
542 if kind == dirkind:
544 if kind == dirkind:
543 if not ignore(nf):
545 if not ignore(nf):
544 match.dir(nf)
546 match.dir(nf)
545 wadd(nf)
547 wadd(nf)
546 if nf in dmap and matchfn(nf):
548 if nf in dmap and matchfn(nf):
547 results[nf] = None
549 results[nf] = None
548 elif kind == regkind or kind == lnkkind:
550 elif kind == regkind or kind == lnkkind:
549 if nf in dmap:
551 if nf in dmap:
550 if matchfn(nf):
552 if matchfn(nf):
551 results[nf] = st
553 results[nf] = st
552 elif matchfn(nf) and not ignore(nf):
554 elif matchfn(nf) and not ignore(nf):
553 results[nf] = st
555 results[nf] = st
554 elif nf in dmap and matchfn(nf):
556 elif nf in dmap and matchfn(nf):
555 results[nf] = None
557 results[nf] = None
556
558
557 # step 3: report unseen items in the dmap hash
559 # step 3: report unseen items in the dmap hash
558 if not skipstep3 and not exact:
560 if not skipstep3 and not exact:
559 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
561 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
560 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
562 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
561 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
563 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
562 st = None
564 st = None
563 results[nf] = st
565 results[nf] = st
564
566
565 del results['.hg']
567 del results['.hg']
566 return results
568 return results
567
569
568 def status(self, match, ignored, clean, unknown):
570 def status(self, match, ignored, clean, unknown):
569 '''Determine the status of the working copy relative to the
571 '''Determine the status of the working copy relative to the
570 dirstate and return a tuple of lists (unsure, modified, added,
572 dirstate and return a tuple of lists (unsure, modified, added,
571 removed, deleted, unknown, ignored, clean), where:
573 removed, deleted, unknown, ignored, clean), where:
572
574
573 unsure:
575 unsure:
574 files that might have been modified since the dirstate was
576 files that might have been modified since the dirstate was
575 written, but need to be read to be sure (size is the same
577 written, but need to be read to be sure (size is the same
576 but mtime differs)
578 but mtime differs)
577 modified:
579 modified:
578 files that have definitely been modified since the dirstate
580 files that have definitely been modified since the dirstate
579 was written (different size or mode)
581 was written (different size or mode)
580 added:
582 added:
581 files that have been explicitly added with hg add
583 files that have been explicitly added with hg add
582 removed:
584 removed:
583 files that have been explicitly removed with hg remove
585 files that have been explicitly removed with hg remove
584 deleted:
586 deleted:
585 files that have been deleted through other means ("missing")
587 files that have been deleted through other means ("missing")
586 unknown:
588 unknown:
587 files not in the dirstate that are not ignored
589 files not in the dirstate that are not ignored
588 ignored:
590 ignored:
589 files not in the dirstate that are ignored
591 files not in the dirstate that are ignored
590 (by _dirignore())
592 (by _dirignore())
591 clean:
593 clean:
592 files that have definitely not been modified since the
594 files that have definitely not been modified since the
593 dirstate was written
595 dirstate was written
594 '''
596 '''
595 listignored, listclean, listunknown = ignored, clean, unknown
597 listignored, listclean, listunknown = ignored, clean, unknown
596 lookup, modified, added, unknown, ignored = [], [], [], [], []
598 lookup, modified, added, unknown, ignored = [], [], [], [], []
597 removed, deleted, clean = [], [], []
599 removed, deleted, clean = [], [], []
598
600
599 dmap = self._map
601 dmap = self._map
600 ladd = lookup.append # aka "unsure"
602 ladd = lookup.append # aka "unsure"
601 madd = modified.append
603 madd = modified.append
602 aadd = added.append
604 aadd = added.append
603 uadd = unknown.append
605 uadd = unknown.append
604 iadd = ignored.append
606 iadd = ignored.append
605 radd = removed.append
607 radd = removed.append
606 dadd = deleted.append
608 dadd = deleted.append
607 cadd = clean.append
609 cadd = clean.append
608
610
609 for fn, st in self.walk(match, listunknown, listignored).iteritems():
611 for fn, st in self.walk(match, listunknown, listignored).iteritems():
610 if fn not in dmap:
612 if fn not in dmap:
611 if (listignored or match.exact(fn)) and self._dirignore(fn):
613 if (listignored or match.exact(fn)) and self._dirignore(fn):
612 if listignored:
614 if listignored:
613 iadd(fn)
615 iadd(fn)
614 elif listunknown:
616 elif listunknown:
615 uadd(fn)
617 uadd(fn)
616 continue
618 continue
617
619
618 state, mode, size, time = dmap[fn]
620 state, mode, size, time = dmap[fn]
619
621
620 if not st and state in "nma":
622 if not st and state in "nma":
621 dadd(fn)
623 dadd(fn)
622 elif state == 'n':
624 elif state == 'n':
623 if (size >= 0 and
625 if (size >= 0 and
624 (size != st.st_size
626 (size != st.st_size
625 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
627 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
626 or size == -2
628 or size == -2
627 or fn in self._copymap):
629 or fn in self._copymap):
628 madd(fn)
630 madd(fn)
629 elif time != int(st.st_mtime):
631 elif time != int(st.st_mtime):
630 ladd(fn)
632 ladd(fn)
631 elif listclean:
633 elif listclean:
632 cadd(fn)
634 cadd(fn)
633 elif state == 'm':
635 elif state == 'm':
634 madd(fn)
636 madd(fn)
635 elif state == 'a':
637 elif state == 'a':
636 aadd(fn)
638 aadd(fn)
637 elif state == 'r':
639 elif state == 'r':
638 radd(fn)
640 radd(fn)
639
641
640 return (lookup, modified, added, removed, deleted, unknown, ignored,
642 return (lookup, modified, added, removed, deleted, unknown, ignored,
641 clean)
643 clean)
@@ -1,90 +1,94 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init a
3 hg init a
4 cd a
4 cd a
5 echo 'root' >root
5 echo 'root' >root
6 hg add root
6 hg add root
7 hg commit -d '0 0' -m "Adding root node"
7 hg commit -d '0 0' -m "Adding root node"
8
8
9 echo 'a' >a
9 echo 'a' >a
10 hg add a
10 hg add a
11 hg branch a
11 hg branch a
12 hg commit -d '1 0' -m "Adding a branch"
12 hg commit -d '1 0' -m "Adding a branch"
13
13
14 hg branch q
14 hg branch q
15 echo 'aa' >a
15 echo 'aa' >a
16 hg branch -C
16 hg branch -C
17 hg commit -d '2 0' -m "Adding to a branch"
17 hg commit -d '2 0' -m "Adding to a branch"
18
18
19 hg update -C 0
19 hg update -C 0
20 echo 'b' >b
20 echo 'b' >b
21 hg add b
21 hg add b
22 hg branch b
22 hg branch b
23 hg commit -d '2 0' -m "Adding b branch"
23 hg commit -d '2 0' -m "Adding b branch"
24
24
25 echo 'bh1' >bh1
25 echo 'bh1' >bh1
26 hg add bh1
26 hg add bh1
27 hg commit -d '3 0' -m "Adding b branch head 1"
27 hg commit -d '3 0' -m "Adding b branch head 1"
28
28
29 hg update -C 2
29 hg update -C 2
30 echo 'bh2' >bh2
30 echo 'bh2' >bh2
31 hg add bh2
31 hg add bh2
32 hg commit -d '4 0' -m "Adding b branch head 2"
32 hg commit -d '4 0' -m "Adding b branch head 2"
33
33
34 echo 'c' >c
34 echo 'c' >c
35 hg add c
35 hg add c
36 hg branch c
36 hg branch c
37 hg commit -d '5 0' -m "Adding c branch"
37 hg commit -d '5 0' -m "Adding c branch"
38
38
39 hg branch tip
40 hg branch null
41 hg branch .
42
39 echo 'd' >d
43 echo 'd' >d
40 hg add d
44 hg add d
41 hg branch 'a branch name much longer than the default justification used by branches'
45 hg branch 'a branch name much longer than the default justification used by branches'
42 hg commit -d '6 0' -m "Adding d branch"
46 hg commit -d '6 0' -m "Adding d branch"
43
47
44 hg branches
48 hg branches
45 echo '-------'
49 echo '-------'
46 hg branches -a
50 hg branches -a
47
51
48 echo "--- Branch a"
52 echo "--- Branch a"
49 hg log -b a
53 hg log -b a
50
54
51 echo "---- Branch b"
55 echo "---- Branch b"
52 hg log -b b
56 hg log -b b
53
57
54 echo "---- going to test branch closing"
58 echo "---- going to test branch closing"
55 hg branches
59 hg branches
56 hg up -C b
60 hg up -C b
57 echo 'xxx1' >> b
61 echo 'xxx1' >> b
58 hg commit -d '7 0' -m 'adding cset to branch b'
62 hg commit -d '7 0' -m 'adding cset to branch b'
59 hg up -C aee39cd168d0
63 hg up -C aee39cd168d0
60 echo 'xxx2' >> b
64 echo 'xxx2' >> b
61 hg commit -d '8 0' -m 'adding head to branch b'
65 hg commit -d '8 0' -m 'adding head to branch b'
62 echo 'xxx3' >> b
66 echo 'xxx3' >> b
63 hg commit -d '9 0' -m 'adding another cset to branch b'
67 hg commit -d '9 0' -m 'adding another cset to branch b'
64 hg branches
68 hg branches
65 hg heads --closed
69 hg heads --closed
66 hg heads
70 hg heads
67 hg commit -d '9 0' --close-branch -m 'prune bad branch'
71 hg commit -d '9 0' --close-branch -m 'prune bad branch'
68 hg branches -a
72 hg branches -a
69 hg up -C b
73 hg up -C b
70 hg commit -d '9 0' --close-branch -m 'close this part branch too'
74 hg commit -d '9 0' --close-branch -m 'close this part branch too'
71 echo '--- b branch should be inactive'
75 echo '--- b branch should be inactive'
72 hg branches
76 hg branches
73 hg branches -c
77 hg branches -c
74 hg branches -a
78 hg branches -a
75 hg heads b
79 hg heads b
76 hg heads --closed b
80 hg heads --closed b
77 echo 'xxx4' >> b
81 echo 'xxx4' >> b
78 hg commit -d '9 0' -m 'reopen branch with a change'
82 hg commit -d '9 0' -m 'reopen branch with a change'
79 echo '--- branch b is back in action'
83 echo '--- branch b is back in action'
80 hg branches -a
84 hg branches -a
81 echo '---- test heads listings'
85 echo '---- test heads listings'
82 hg heads
86 hg heads
83 echo '% branch default'
87 echo '% branch default'
84 hg heads default
88 hg heads default
85 echo '% branch a'
89 echo '% branch a'
86 hg heads a
90 hg heads a
87 hg heads --active a
91 hg heads --active a
88 echo '% branch b'
92 echo '% branch b'
89 hg heads b
93 hg heads b
90 hg heads --closed b
94 hg heads --closed b
@@ -1,173 +1,176 b''
1 marked working directory as branch a
1 marked working directory as branch a
2 marked working directory as branch q
2 marked working directory as branch q
3 reset working directory to branch a
3 reset working directory to branch a
4 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
4 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
5 marked working directory as branch b
5 marked working directory as branch b
6 created new head
6 created new head
7 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
7 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
8 marked working directory as branch c
8 marked working directory as branch c
9 abort: the name 'tip' is reserved
10 abort: the name 'null' is reserved
11 abort: the name '.' is reserved
9 marked working directory as branch a branch name much longer than the default justification used by branches
12 marked working directory as branch a branch name much longer than the default justification used by branches
10 a branch name much longer than the default justification used by branches 7:10ff5895aa57
13 a branch name much longer than the default justification used by branches 7:10ff5895aa57
11 b 4:aee39cd168d0
14 b 4:aee39cd168d0
12 c 6:589736a22561 (inactive)
15 c 6:589736a22561 (inactive)
13 a 5:d8cbc61dbaa6 (inactive)
16 a 5:d8cbc61dbaa6 (inactive)
14 default 0:19709c5a4e75 (inactive)
17 default 0:19709c5a4e75 (inactive)
15 -------
18 -------
16 a branch name much longer than the default justification used by branches 7:10ff5895aa57
19 a branch name much longer than the default justification used by branches 7:10ff5895aa57
17 b 4:aee39cd168d0
20 b 4:aee39cd168d0
18 --- Branch a
21 --- Branch a
19 changeset: 5:d8cbc61dbaa6
22 changeset: 5:d8cbc61dbaa6
20 branch: a
23 branch: a
21 parent: 2:881fe2b92ad0
24 parent: 2:881fe2b92ad0
22 user: test
25 user: test
23 date: Thu Jan 01 00:00:04 1970 +0000
26 date: Thu Jan 01 00:00:04 1970 +0000
24 summary: Adding b branch head 2
27 summary: Adding b branch head 2
25
28
26 changeset: 2:881fe2b92ad0
29 changeset: 2:881fe2b92ad0
27 branch: a
30 branch: a
28 user: test
31 user: test
29 date: Thu Jan 01 00:00:02 1970 +0000
32 date: Thu Jan 01 00:00:02 1970 +0000
30 summary: Adding to a branch
33 summary: Adding to a branch
31
34
32 changeset: 1:dd6b440dd85a
35 changeset: 1:dd6b440dd85a
33 branch: a
36 branch: a
34 user: test
37 user: test
35 date: Thu Jan 01 00:00:01 1970 +0000
38 date: Thu Jan 01 00:00:01 1970 +0000
36 summary: Adding a branch
39 summary: Adding a branch
37
40
38 ---- Branch b
41 ---- Branch b
39 changeset: 4:aee39cd168d0
42 changeset: 4:aee39cd168d0
40 branch: b
43 branch: b
41 user: test
44 user: test
42 date: Thu Jan 01 00:00:03 1970 +0000
45 date: Thu Jan 01 00:00:03 1970 +0000
43 summary: Adding b branch head 1
46 summary: Adding b branch head 1
44
47
45 changeset: 3:ac22033332d1
48 changeset: 3:ac22033332d1
46 branch: b
49 branch: b
47 parent: 0:19709c5a4e75
50 parent: 0:19709c5a4e75
48 user: test
51 user: test
49 date: Thu Jan 01 00:00:02 1970 +0000
52 date: Thu Jan 01 00:00:02 1970 +0000
50 summary: Adding b branch
53 summary: Adding b branch
51
54
52 ---- going to test branch closing
55 ---- going to test branch closing
53 a branch name much longer than the default justification used by branches 7:10ff5895aa57
56 a branch name much longer than the default justification used by branches 7:10ff5895aa57
54 b 4:aee39cd168d0
57 b 4:aee39cd168d0
55 c 6:589736a22561 (inactive)
58 c 6:589736a22561 (inactive)
56 a 5:d8cbc61dbaa6 (inactive)
59 a 5:d8cbc61dbaa6 (inactive)
57 default 0:19709c5a4e75 (inactive)
60 default 0:19709c5a4e75 (inactive)
58 2 files updated, 0 files merged, 4 files removed, 0 files unresolved
61 2 files updated, 0 files merged, 4 files removed, 0 files unresolved
59 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
62 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
60 created new head
63 created new head
61 b 10:bfbe841b666e
64 b 10:bfbe841b666e
62 a branch name much longer than the default justification used by branches 7:10ff5895aa57
65 a branch name much longer than the default justification used by branches 7:10ff5895aa57
63 c 6:589736a22561 (inactive)
66 c 6:589736a22561 (inactive)
64 a 5:d8cbc61dbaa6 (inactive)
67 a 5:d8cbc61dbaa6 (inactive)
65 default 0:19709c5a4e75 (inactive)
68 default 0:19709c5a4e75 (inactive)
66 abort: you must specify a branch to use --closed
69 abort: you must specify a branch to use --closed
67 changeset: 10:bfbe841b666e
70 changeset: 10:bfbe841b666e
68 branch: b
71 branch: b
69 tag: tip
72 tag: tip
70 user: test
73 user: test
71 date: Thu Jan 01 00:00:09 1970 +0000
74 date: Thu Jan 01 00:00:09 1970 +0000
72 summary: adding another cset to branch b
75 summary: adding another cset to branch b
73
76
74 changeset: 8:eebb944467c9
77 changeset: 8:eebb944467c9
75 branch: b
78 branch: b
76 parent: 4:aee39cd168d0
79 parent: 4:aee39cd168d0
77 user: test
80 user: test
78 date: Thu Jan 01 00:00:07 1970 +0000
81 date: Thu Jan 01 00:00:07 1970 +0000
79 summary: adding cset to branch b
82 summary: adding cset to branch b
80
83
81 changeset: 7:10ff5895aa57
84 changeset: 7:10ff5895aa57
82 branch: a branch name much longer than the default justification used by branches
85 branch: a branch name much longer than the default justification used by branches
83 user: test
86 user: test
84 date: Thu Jan 01 00:00:06 1970 +0000
87 date: Thu Jan 01 00:00:06 1970 +0000
85 summary: Adding d branch
88 summary: Adding d branch
86
89
87 b 8:eebb944467c9
90 b 8:eebb944467c9
88 a branch name much longer than the default justification used by branches 7:10ff5895aa57
91 a branch name much longer than the default justification used by branches 7:10ff5895aa57
89 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
92 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
90 --- b branch should be inactive
93 --- b branch should be inactive
91 a branch name much longer than the default justification used by branches 7:10ff5895aa57
94 a branch name much longer than the default justification used by branches 7:10ff5895aa57
92 c 6:589736a22561 (inactive)
95 c 6:589736a22561 (inactive)
93 a 5:d8cbc61dbaa6 (inactive)
96 a 5:d8cbc61dbaa6 (inactive)
94 default 0:19709c5a4e75 (inactive)
97 default 0:19709c5a4e75 (inactive)
95 a branch name much longer than the default justification used by branches 7:10ff5895aa57
98 a branch name much longer than the default justification used by branches 7:10ff5895aa57
96 b 12:2da6583810df (closed)
99 b 12:2da6583810df (closed)
97 c 6:589736a22561 (inactive)
100 c 6:589736a22561 (inactive)
98 a 5:d8cbc61dbaa6 (inactive)
101 a 5:d8cbc61dbaa6 (inactive)
99 default 0:19709c5a4e75 (inactive)
102 default 0:19709c5a4e75 (inactive)
100 a branch name much longer than the default justification used by branches 7:10ff5895aa57
103 a branch name much longer than the default justification used by branches 7:10ff5895aa57
101 no open branch heads on branch b
104 no open branch heads on branch b
102 changeset: 12:2da6583810df
105 changeset: 12:2da6583810df
103 branch: b
106 branch: b
104 tag: tip
107 tag: tip
105 parent: 8:eebb944467c9
108 parent: 8:eebb944467c9
106 user: test
109 user: test
107 date: Thu Jan 01 00:00:09 1970 +0000
110 date: Thu Jan 01 00:00:09 1970 +0000
108 summary: close this part branch too
111 summary: close this part branch too
109
112
110 changeset: 11:c84627f3c15d
113 changeset: 11:c84627f3c15d
111 branch: b
114 branch: b
112 user: test
115 user: test
113 date: Thu Jan 01 00:00:09 1970 +0000
116 date: Thu Jan 01 00:00:09 1970 +0000
114 summary: prune bad branch
117 summary: prune bad branch
115
118
116 --- branch b is back in action
119 --- branch b is back in action
117 b 13:6ac12926b8c3
120 b 13:6ac12926b8c3
118 a branch name much longer than the default justification used by branches 7:10ff5895aa57
121 a branch name much longer than the default justification used by branches 7:10ff5895aa57
119 ---- test heads listings
122 ---- test heads listings
120 changeset: 13:6ac12926b8c3
123 changeset: 13:6ac12926b8c3
121 branch: b
124 branch: b
122 tag: tip
125 tag: tip
123 user: test
126 user: test
124 date: Thu Jan 01 00:00:09 1970 +0000
127 date: Thu Jan 01 00:00:09 1970 +0000
125 summary: reopen branch with a change
128 summary: reopen branch with a change
126
129
127 changeset: 11:c84627f3c15d
130 changeset: 11:c84627f3c15d
128 branch: b
131 branch: b
129 user: test
132 user: test
130 date: Thu Jan 01 00:00:09 1970 +0000
133 date: Thu Jan 01 00:00:09 1970 +0000
131 summary: prune bad branch
134 summary: prune bad branch
132
135
133 changeset: 7:10ff5895aa57
136 changeset: 7:10ff5895aa57
134 branch: a branch name much longer than the default justification used by branches
137 branch: a branch name much longer than the default justification used by branches
135 user: test
138 user: test
136 date: Thu Jan 01 00:00:06 1970 +0000
139 date: Thu Jan 01 00:00:06 1970 +0000
137 summary: Adding d branch
140 summary: Adding d branch
138
141
139 % branch default
142 % branch default
140 changeset: 0:19709c5a4e75
143 changeset: 0:19709c5a4e75
141 user: test
144 user: test
142 date: Thu Jan 01 00:00:00 1970 +0000
145 date: Thu Jan 01 00:00:00 1970 +0000
143 summary: Adding root node
146 summary: Adding root node
144
147
145 % branch a
148 % branch a
146 changeset: 5:d8cbc61dbaa6
149 changeset: 5:d8cbc61dbaa6
147 branch: a
150 branch: a
148 parent: 2:881fe2b92ad0
151 parent: 2:881fe2b92ad0
149 user: test
152 user: test
150 date: Thu Jan 01 00:00:04 1970 +0000
153 date: Thu Jan 01 00:00:04 1970 +0000
151 summary: Adding b branch head 2
154 summary: Adding b branch head 2
152
155
153 % branch b
156 % branch b
154 changeset: 13:6ac12926b8c3
157 changeset: 13:6ac12926b8c3
155 branch: b
158 branch: b
156 tag: tip
159 tag: tip
157 user: test
160 user: test
158 date: Thu Jan 01 00:00:09 1970 +0000
161 date: Thu Jan 01 00:00:09 1970 +0000
159 summary: reopen branch with a change
162 summary: reopen branch with a change
160
163
161 changeset: 13:6ac12926b8c3
164 changeset: 13:6ac12926b8c3
162 branch: b
165 branch: b
163 tag: tip
166 tag: tip
164 user: test
167 user: test
165 date: Thu Jan 01 00:00:09 1970 +0000
168 date: Thu Jan 01 00:00:09 1970 +0000
166 summary: reopen branch with a change
169 summary: reopen branch with a change
167
170
168 changeset: 11:c84627f3c15d
171 changeset: 11:c84627f3c15d
169 branch: b
172 branch: b
170 user: test
173 user: test
171 date: Thu Jan 01 00:00:09 1970 +0000
174 date: Thu Jan 01 00:00:09 1970 +0000
172 summary: prune bad branch
175 summary: prune bad branch
173
176
General Comments 0
You need to be logged in to leave comments. Login now