Show More
@@ -1,228 +1,233 b'' | |||
|
1 | 1 | # changelog.py - changelog class for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from node import bin, hex, nullid |
|
9 | 9 | from i18n import _ |
|
10 | 10 | import util, error, revlog, encoding |
|
11 | 11 | |
|
12 | 12 | def _string_escape(text): |
|
13 | 13 | """ |
|
14 | 14 | >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)} |
|
15 | 15 | >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d |
|
16 | 16 | >>> s |
|
17 | 17 | 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n' |
|
18 | 18 | >>> res = _string_escape(s) |
|
19 | 19 | >>> s == res.decode('string_escape') |
|
20 | 20 | True |
|
21 | 21 | """ |
|
22 | 22 | # subset of the string_escape codec |
|
23 | 23 | text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r') |
|
24 | 24 | return text.replace('\0', '\\0') |
|
25 | 25 | |
|
26 | 26 | def decodeextra(text): |
|
27 | 27 | extra = {} |
|
28 | 28 | for l in text.split('\0'): |
|
29 | 29 | if l: |
|
30 | 30 | k, v = l.decode('string_escape').split(':', 1) |
|
31 | 31 | extra[k] = v |
|
32 | 32 | return extra |
|
33 | 33 | |
|
34 | 34 | def encodeextra(d): |
|
35 | 35 | # keys must be sorted to produce a deterministic changelog entry |
|
36 | 36 | items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)] |
|
37 | 37 | return "\0".join(items) |
|
38 | 38 | |
|
39 | 39 | class appender(object): |
|
40 | 40 | '''the changelog index must be updated last on disk, so we use this class |
|
41 | 41 | to delay writes to it''' |
|
42 | 42 | def __init__(self, fp, buf): |
|
43 | 43 | self.data = buf |
|
44 | 44 | self.fp = fp |
|
45 | 45 | self.offset = fp.tell() |
|
46 | 46 | self.size = util.fstat(fp).st_size |
|
47 | 47 | |
|
48 | 48 | def end(self): |
|
49 | 49 | return self.size + len("".join(self.data)) |
|
50 | 50 | def tell(self): |
|
51 | 51 | return self.offset |
|
52 | 52 | def flush(self): |
|
53 | 53 | pass |
|
54 | 54 | def close(self): |
|
55 | 55 | self.fp.close() |
|
56 | 56 | |
|
57 | 57 | def seek(self, offset, whence=0): |
|
58 | 58 | '''virtual file offset spans real file and data''' |
|
59 | 59 | if whence == 0: |
|
60 | 60 | self.offset = offset |
|
61 | 61 | elif whence == 1: |
|
62 | 62 | self.offset += offset |
|
63 | 63 | elif whence == 2: |
|
64 | 64 | self.offset = self.end() + offset |
|
65 | 65 | if self.offset < self.size: |
|
66 | 66 | self.fp.seek(self.offset) |
|
67 | 67 | |
|
68 | 68 | def read(self, count=-1): |
|
69 | 69 | '''only trick here is reads that span real file and data''' |
|
70 | 70 | ret = "" |
|
71 | 71 | if self.offset < self.size: |
|
72 | 72 | s = self.fp.read(count) |
|
73 | 73 | ret = s |
|
74 | 74 | self.offset += len(s) |
|
75 | 75 | if count > 0: |
|
76 | 76 | count -= len(s) |
|
77 | 77 | if count != 0: |
|
78 | 78 | doff = self.offset - self.size |
|
79 | 79 | self.data.insert(0, "".join(self.data)) |
|
80 | 80 | del self.data[1:] |
|
81 | 81 | s = self.data[0][doff:doff+count] |
|
82 | 82 | self.offset += len(s) |
|
83 | 83 | ret += s |
|
84 | 84 | return ret |
|
85 | 85 | |
|
86 | 86 | def write(self, s): |
|
87 | 87 | self.data.append(str(s)) |
|
88 | 88 | self.offset += len(s) |
|
89 | 89 | |
|
90 | 90 | def delayopener(opener, target, divert, buf): |
|
91 | 91 | def o(name, mode='r'): |
|
92 | 92 | if name != target: |
|
93 | 93 | return opener(name, mode) |
|
94 | 94 | if divert: |
|
95 | 95 | return opener(name + ".a", mode.replace('a', 'w')) |
|
96 | 96 | # otherwise, divert to memory |
|
97 | 97 | return appender(opener(name, mode), buf) |
|
98 | 98 | return o |
|
99 | 99 | |
|
100 | 100 | class changelog(revlog.revlog): |
|
101 | 101 | def __init__(self, opener): |
|
102 | 102 | revlog.revlog.__init__(self, opener, "00changelog.i") |
|
103 | 103 | self._realopener = opener |
|
104 | 104 | self._delayed = False |
|
105 | 105 | self._divert = False |
|
106 | 106 | |
|
107 | 107 | def delayupdate(self): |
|
108 | 108 | "delay visibility of index updates to other readers" |
|
109 | 109 | self._delayed = True |
|
110 | 110 | self._divert = (len(self) == 0) |
|
111 | 111 | self._delaybuf = [] |
|
112 | 112 | self.opener = delayopener(self._realopener, self.indexfile, |
|
113 | 113 | self._divert, self._delaybuf) |
|
114 | 114 | |
|
115 | 115 | def finalize(self, tr): |
|
116 | 116 | "finalize index updates" |
|
117 | 117 | self._delayed = False |
|
118 | 118 | self.opener = self._realopener |
|
119 | 119 | # move redirected index data back into place |
|
120 | 120 | if self._divert: |
|
121 | 121 | n = self.opener(self.indexfile + ".a").name |
|
122 | 122 | util.rename(n, n[:-2]) |
|
123 | 123 | elif self._delaybuf: |
|
124 | 124 | fp = self.opener(self.indexfile, 'a') |
|
125 | 125 | fp.write("".join(self._delaybuf)) |
|
126 | 126 | fp.close() |
|
127 | 127 | self._delaybuf = [] |
|
128 | 128 | # split when we're done |
|
129 | 129 | self.checkinlinesize(tr) |
|
130 | 130 | |
|
131 | 131 | def readpending(self, file): |
|
132 | 132 | r = revlog.revlog(self.opener, file) |
|
133 | 133 | self.index = r.index |
|
134 | 134 | self.nodemap = r.nodemap |
|
135 | 135 | self._chunkcache = r._chunkcache |
|
136 | 136 | |
|
137 | 137 | def writepending(self): |
|
138 | 138 | "create a file containing the unfinalized state for pretxnchangegroup" |
|
139 | 139 | if self._delaybuf: |
|
140 | 140 | # make a temporary copy of the index |
|
141 | 141 | fp1 = self._realopener(self.indexfile) |
|
142 | 142 | fp2 = self._realopener(self.indexfile + ".a", "w") |
|
143 | 143 | fp2.write(fp1.read()) |
|
144 | 144 | # add pending data |
|
145 | 145 | fp2.write("".join(self._delaybuf)) |
|
146 | 146 | fp2.close() |
|
147 | 147 | # switch modes so finalize can simply rename |
|
148 | 148 | self._delaybuf = [] |
|
149 | 149 | self._divert = True |
|
150 | 150 | |
|
151 | 151 | if self._divert: |
|
152 | 152 | return True |
|
153 | 153 | |
|
154 | 154 | return False |
|
155 | 155 | |
|
156 | 156 | def checkinlinesize(self, tr, fp=None): |
|
157 | 157 | if not self._delayed: |
|
158 | 158 | revlog.revlog.checkinlinesize(self, tr, fp) |
|
159 | 159 | |
|
160 | 160 | def read(self, node): |
|
161 | 161 | """ |
|
162 | 162 | format used: |
|
163 | 163 | nodeid\n : manifest node in ascii |
|
164 | 164 | user\n : user, no \n or \r allowed |
|
165 | 165 | time tz extra\n : date (time is int or float, timezone is int) |
|
166 | 166 | : extra is metadatas, encoded and separated by '\0' |
|
167 | 167 | : older versions ignore it |
|
168 | 168 | files\n\n : files modified by the cset, no \n or \r allowed |
|
169 | 169 | (.*) : comment (free text, ideally utf-8) |
|
170 | 170 | |
|
171 | 171 | changelog v0 doesn't use extra |
|
172 | 172 | """ |
|
173 | 173 | text = self.revision(node) |
|
174 | 174 | if not text: |
|
175 | 175 | return (nullid, "", (0, 0), [], "", {'branch': 'default'}) |
|
176 | 176 | last = text.index("\n\n") |
|
177 | 177 | desc = encoding.tolocal(text[last + 2:]) |
|
178 | 178 | l = text[:last].split('\n') |
|
179 | 179 | manifest = bin(l[0]) |
|
180 | 180 | user = encoding.tolocal(l[1]) |
|
181 | 181 | |
|
182 | 182 | extra_data = l[2].split(' ', 2) |
|
183 | 183 | if len(extra_data) != 3: |
|
184 | 184 | time = float(extra_data.pop(0)) |
|
185 | 185 | try: |
|
186 | 186 | # various tools did silly things with the time zone field. |
|
187 | 187 | timezone = int(extra_data[0]) |
|
188 | 188 | except: |
|
189 | 189 | timezone = 0 |
|
190 | 190 | extra = {} |
|
191 | 191 | else: |
|
192 | 192 | time, timezone, extra = extra_data |
|
193 | 193 | time, timezone = float(time), int(timezone) |
|
194 | 194 | extra = decodeextra(extra) |
|
195 | 195 | if not extra.get('branch'): |
|
196 | 196 | extra['branch'] = 'default' |
|
197 | 197 | files = l[3:] |
|
198 | 198 | return (manifest, user, (time, timezone), files, desc, extra) |
|
199 | 199 | |
|
200 | 200 | def add(self, manifest, files, desc, transaction, p1, p2, |
|
201 | 201 | user, date=None, extra=None): |
|
202 | 202 | user = user.strip() |
|
203 | 203 | # An empty username or a username with a "\n" will make the |
|
204 | 204 | # revision text contain two "\n\n" sequences -> corrupt |
|
205 | 205 | # repository since read cannot unpack the revision. |
|
206 | 206 | if not user: |
|
207 | 207 | raise error.RevlogError(_("empty username")) |
|
208 | 208 | if "\n" in user: |
|
209 | 209 | raise error.RevlogError(_("username %s contains a newline") |
|
210 | 210 | % repr(user)) |
|
211 | 211 | |
|
212 | 212 | # strip trailing whitespace and leading and trailing empty lines |
|
213 | 213 | desc = '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n') |
|
214 | 214 | |
|
215 | 215 | user, desc = encoding.fromlocal(user), encoding.fromlocal(desc) |
|
216 | 216 | |
|
217 | 217 | if date: |
|
218 | 218 | parseddate = "%d %d" % util.parsedate(date) |
|
219 | 219 | else: |
|
220 | 220 | parseddate = "%d %d" % util.makedate() |
|
221 | if extra and extra.get("branch") in ("default", ""): | |
|
222 |
|
|
|
221 | if extra: | |
|
222 | branch = extra.get("branch") | |
|
223 | if branch in ("default", ""): | |
|
224 | del extra["branch"] | |
|
225 | elif branch in (".", "null", "tip"): | |
|
226 | raise error.RevlogError(_('the name \'%s\' is reserved') | |
|
227 | % branch) | |
|
223 | 228 | if extra: |
|
224 | 229 | extra = encodeextra(extra) |
|
225 | 230 | parseddate = "%s %s" % (parseddate, extra) |
|
226 | 231 | l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc] |
|
227 | 232 | text = "\n".join(l) |
|
228 | 233 | return self.addrevision(text, transaction, len(self), p1, p2) |
@@ -1,641 +1,643 b'' | |||
|
1 | 1 | # dirstate.py - working directory tracking for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from node import nullid |
|
9 | 9 | from i18n import _ |
|
10 | 10 | import util, ignore, osutil, parsers |
|
11 | 11 | import struct, os, stat, errno |
|
12 | 12 | import cStringIO |
|
13 | 13 | |
|
14 | 14 | _unknown = ('?', 0, 0, 0) |
|
15 | 15 | _format = ">cllll" |
|
16 | 16 | propertycache = util.propertycache |
|
17 | 17 | |
|
18 | 18 | def _finddirs(path): |
|
19 | 19 | pos = path.rfind('/') |
|
20 | 20 | while pos != -1: |
|
21 | 21 | yield path[:pos] |
|
22 | 22 | pos = path.rfind('/', 0, pos) |
|
23 | 23 | |
|
24 | 24 | def _incdirs(dirs, path): |
|
25 | 25 | for base in _finddirs(path): |
|
26 | 26 | if base in dirs: |
|
27 | 27 | dirs[base] += 1 |
|
28 | 28 | return |
|
29 | 29 | dirs[base] = 1 |
|
30 | 30 | |
|
31 | 31 | def _decdirs(dirs, path): |
|
32 | 32 | for base in _finddirs(path): |
|
33 | 33 | if dirs[base] > 1: |
|
34 | 34 | dirs[base] -= 1 |
|
35 | 35 | return |
|
36 | 36 | del dirs[base] |
|
37 | 37 | |
|
38 | 38 | class dirstate(object): |
|
39 | 39 | |
|
40 | 40 | def __init__(self, opener, ui, root): |
|
41 | 41 | '''Create a new dirstate object. opener is an open()-like callable |
|
42 | 42 | that can be used to open the dirstate file; root is the root of the |
|
43 | 43 | directory tracked by the dirstate.''' |
|
44 | 44 | self._opener = opener |
|
45 | 45 | self._root = root |
|
46 | 46 | self._rootdir = os.path.join(root, '') |
|
47 | 47 | self._dirty = False |
|
48 | 48 | self._dirtypl = False |
|
49 | 49 | self._ui = ui |
|
50 | 50 | |
|
51 | 51 | @propertycache |
|
52 | 52 | def _map(self): |
|
53 | 53 | '''Return the dirstate contents as a map from filename to |
|
54 | 54 | (state, mode, size, time).''' |
|
55 | 55 | self._read() |
|
56 | 56 | return self._map |
|
57 | 57 | |
|
58 | 58 | @propertycache |
|
59 | 59 | def _copymap(self): |
|
60 | 60 | self._read() |
|
61 | 61 | return self._copymap |
|
62 | 62 | |
|
63 | 63 | @propertycache |
|
64 | 64 | def _foldmap(self): |
|
65 | 65 | f = {} |
|
66 | 66 | for name in self._map: |
|
67 | 67 | f[os.path.normcase(name)] = name |
|
68 | 68 | return f |
|
69 | 69 | |
|
70 | 70 | @propertycache |
|
71 | 71 | def _branch(self): |
|
72 | 72 | try: |
|
73 | 73 | return self._opener("branch").read().strip() or "default" |
|
74 | 74 | except IOError: |
|
75 | 75 | return "default" |
|
76 | 76 | |
|
77 | 77 | @propertycache |
|
78 | 78 | def _pl(self): |
|
79 | 79 | try: |
|
80 | 80 | st = self._opener("dirstate").read(40) |
|
81 | 81 | l = len(st) |
|
82 | 82 | if l == 40: |
|
83 | 83 | return st[:20], st[20:40] |
|
84 | 84 | elif l > 0 and l < 40: |
|
85 | 85 | raise util.Abort(_('working directory state appears damaged!')) |
|
86 | 86 | except IOError, err: |
|
87 | 87 | if err.errno != errno.ENOENT: raise |
|
88 | 88 | return [nullid, nullid] |
|
89 | 89 | |
|
90 | 90 | @propertycache |
|
91 | 91 | def _dirs(self): |
|
92 | 92 | dirs = {} |
|
93 | 93 | for f,s in self._map.iteritems(): |
|
94 | 94 | if s[0] != 'r': |
|
95 | 95 | _incdirs(dirs, f) |
|
96 | 96 | return dirs |
|
97 | 97 | |
|
98 | 98 | @propertycache |
|
99 | 99 | def _ignore(self): |
|
100 | 100 | files = [self._join('.hgignore')] |
|
101 | 101 | for name, path in self._ui.configitems("ui"): |
|
102 | 102 | if name == 'ignore' or name.startswith('ignore.'): |
|
103 | 103 | files.append(util.expandpath(path)) |
|
104 | 104 | return ignore.ignore(self._root, files, self._ui.warn) |
|
105 | 105 | |
|
106 | 106 | @propertycache |
|
107 | 107 | def _slash(self): |
|
108 | 108 | return self._ui.configbool('ui', 'slash') and os.sep != '/' |
|
109 | 109 | |
|
110 | 110 | @propertycache |
|
111 | 111 | def _checklink(self): |
|
112 | 112 | return util.checklink(self._root) |
|
113 | 113 | |
|
114 | 114 | @propertycache |
|
115 | 115 | def _checkexec(self): |
|
116 | 116 | return util.checkexec(self._root) |
|
117 | 117 | |
|
118 | 118 | @propertycache |
|
119 | 119 | def _checkcase(self): |
|
120 | 120 | return not util.checkcase(self._join('.hg')) |
|
121 | 121 | |
|
122 | 122 | def _join(self, f): |
|
123 | 123 | # much faster than os.path.join() |
|
124 | 124 | # it's safe because f is always a relative path |
|
125 | 125 | return self._rootdir + f |
|
126 | 126 | |
|
127 | 127 | def flagfunc(self, fallback): |
|
128 | 128 | if self._checklink: |
|
129 | 129 | if self._checkexec: |
|
130 | 130 | def f(x): |
|
131 | 131 | p = self._join(x) |
|
132 | 132 | if os.path.islink(p): |
|
133 | 133 | return 'l' |
|
134 | 134 | if util.is_exec(p): |
|
135 | 135 | return 'x' |
|
136 | 136 | return '' |
|
137 | 137 | return f |
|
138 | 138 | def f(x): |
|
139 | 139 | if os.path.islink(self._join(x)): |
|
140 | 140 | return 'l' |
|
141 | 141 | if 'x' in fallback(x): |
|
142 | 142 | return 'x' |
|
143 | 143 | return '' |
|
144 | 144 | return f |
|
145 | 145 | if self._checkexec: |
|
146 | 146 | def f(x): |
|
147 | 147 | if 'l' in fallback(x): |
|
148 | 148 | return 'l' |
|
149 | 149 | if util.is_exec(self._join(x)): |
|
150 | 150 | return 'x' |
|
151 | 151 | return '' |
|
152 | 152 | return f |
|
153 | 153 | return fallback |
|
154 | 154 | |
|
155 | 155 | def getcwd(self): |
|
156 | 156 | cwd = os.getcwd() |
|
157 | 157 | if cwd == self._root: return '' |
|
158 | 158 | # self._root ends with a path separator if self._root is '/' or 'C:\' |
|
159 | 159 | rootsep = self._root |
|
160 | 160 | if not util.endswithsep(rootsep): |
|
161 | 161 | rootsep += os.sep |
|
162 | 162 | if cwd.startswith(rootsep): |
|
163 | 163 | return cwd[len(rootsep):] |
|
164 | 164 | else: |
|
165 | 165 | # we're outside the repo. return an absolute path. |
|
166 | 166 | return cwd |
|
167 | 167 | |
|
168 | 168 | def pathto(self, f, cwd=None): |
|
169 | 169 | if cwd is None: |
|
170 | 170 | cwd = self.getcwd() |
|
171 | 171 | path = util.pathto(self._root, cwd, f) |
|
172 | 172 | if self._slash: |
|
173 | 173 | return util.normpath(path) |
|
174 | 174 | return path |
|
175 | 175 | |
|
176 | 176 | def __getitem__(self, key): |
|
177 | 177 | '''Return the current state of key (a filename) in the dirstate. |
|
178 | 178 | States are: |
|
179 | 179 | n normal |
|
180 | 180 | m needs merging |
|
181 | 181 | r marked for removal |
|
182 | 182 | a marked for addition |
|
183 | 183 | ? not tracked |
|
184 | 184 | ''' |
|
185 | 185 | return self._map.get(key, ("?",))[0] |
|
186 | 186 | |
|
187 | 187 | def __contains__(self, key): |
|
188 | 188 | return key in self._map |
|
189 | 189 | |
|
190 | 190 | def __iter__(self): |
|
191 | 191 | for x in sorted(self._map): |
|
192 | 192 | yield x |
|
193 | 193 | |
|
194 | 194 | def parents(self): |
|
195 | 195 | return self._pl |
|
196 | 196 | |
|
197 | 197 | def branch(self): |
|
198 | 198 | return self._branch |
|
199 | 199 | |
|
200 | 200 | def setparents(self, p1, p2=nullid): |
|
201 | 201 | self._dirty = self._dirtypl = True |
|
202 | 202 | self._pl = p1, p2 |
|
203 | 203 | |
|
204 | 204 | def setbranch(self, branch): |
|
205 | if branch in ['tip', '.', 'null']: | |
|
206 | raise util.Abort(_('the name \'%s\' is reserved') % branch) | |
|
205 | 207 | self._branch = branch |
|
206 | 208 | self._opener("branch", "w").write(branch + '\n') |
|
207 | 209 | |
|
208 | 210 | def _read(self): |
|
209 | 211 | self._map = {} |
|
210 | 212 | self._copymap = {} |
|
211 | 213 | try: |
|
212 | 214 | st = self._opener("dirstate").read() |
|
213 | 215 | except IOError, err: |
|
214 | 216 | if err.errno != errno.ENOENT: raise |
|
215 | 217 | return |
|
216 | 218 | if not st: |
|
217 | 219 | return |
|
218 | 220 | |
|
219 | 221 | p = parsers.parse_dirstate(self._map, self._copymap, st) |
|
220 | 222 | if not self._dirtypl: |
|
221 | 223 | self._pl = p |
|
222 | 224 | |
|
223 | 225 | def invalidate(self): |
|
224 | 226 | for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split(): |
|
225 | 227 | if a in self.__dict__: |
|
226 | 228 | delattr(self, a) |
|
227 | 229 | self._dirty = False |
|
228 | 230 | |
|
229 | 231 | def copy(self, source, dest): |
|
230 | 232 | """Mark dest as a copy of source. Unmark dest if source is None. |
|
231 | 233 | """ |
|
232 | 234 | if source == dest: |
|
233 | 235 | return |
|
234 | 236 | self._dirty = True |
|
235 | 237 | if source is not None: |
|
236 | 238 | self._copymap[dest] = source |
|
237 | 239 | elif dest in self._copymap: |
|
238 | 240 | del self._copymap[dest] |
|
239 | 241 | |
|
240 | 242 | def copied(self, file): |
|
241 | 243 | return self._copymap.get(file, None) |
|
242 | 244 | |
|
243 | 245 | def copies(self): |
|
244 | 246 | return self._copymap |
|
245 | 247 | |
|
246 | 248 | def _droppath(self, f): |
|
247 | 249 | if self[f] not in "?r" and "_dirs" in self.__dict__: |
|
248 | 250 | _decdirs(self._dirs, f) |
|
249 | 251 | |
|
250 | 252 | def _addpath(self, f, check=False): |
|
251 | 253 | oldstate = self[f] |
|
252 | 254 | if check or oldstate == "r": |
|
253 | 255 | if '\r' in f or '\n' in f: |
|
254 | 256 | raise util.Abort( |
|
255 | 257 | _("'\\n' and '\\r' disallowed in filenames: %r") % f) |
|
256 | 258 | if f in self._dirs: |
|
257 | 259 | raise util.Abort(_('directory %r already in dirstate') % f) |
|
258 | 260 | # shadows |
|
259 | 261 | for d in _finddirs(f): |
|
260 | 262 | if d in self._dirs: |
|
261 | 263 | break |
|
262 | 264 | if d in self._map and self[d] != 'r': |
|
263 | 265 | raise util.Abort( |
|
264 | 266 | _('file %r in dirstate clashes with %r') % (d, f)) |
|
265 | 267 | if oldstate in "?r" and "_dirs" in self.__dict__: |
|
266 | 268 | _incdirs(self._dirs, f) |
|
267 | 269 | |
|
268 | 270 | def normal(self, f): |
|
269 | 271 | 'mark a file normal and clean' |
|
270 | 272 | self._dirty = True |
|
271 | 273 | self._addpath(f) |
|
272 | 274 | s = os.lstat(self._join(f)) |
|
273 | 275 | self._map[f] = ('n', s.st_mode, s.st_size, int(s.st_mtime)) |
|
274 | 276 | if f in self._copymap: |
|
275 | 277 | del self._copymap[f] |
|
276 | 278 | |
|
277 | 279 | def normallookup(self, f): |
|
278 | 280 | 'mark a file normal, but possibly dirty' |
|
279 | 281 | if self._pl[1] != nullid and f in self._map: |
|
280 | 282 | # if there is a merge going on and the file was either |
|
281 | 283 | # in state 'm' or dirty before being removed, restore that state. |
|
282 | 284 | entry = self._map[f] |
|
283 | 285 | if entry[0] == 'r' and entry[2] in (-1, -2): |
|
284 | 286 | source = self._copymap.get(f) |
|
285 | 287 | if entry[2] == -1: |
|
286 | 288 | self.merge(f) |
|
287 | 289 | elif entry[2] == -2: |
|
288 | 290 | self.normaldirty(f) |
|
289 | 291 | if source: |
|
290 | 292 | self.copy(source, f) |
|
291 | 293 | return |
|
292 | 294 | if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2: |
|
293 | 295 | return |
|
294 | 296 | self._dirty = True |
|
295 | 297 | self._addpath(f) |
|
296 | 298 | self._map[f] = ('n', 0, -1, -1) |
|
297 | 299 | if f in self._copymap: |
|
298 | 300 | del self._copymap[f] |
|
299 | 301 | |
|
300 | 302 | def normaldirty(self, f): |
|
301 | 303 | 'mark a file normal, but dirty' |
|
302 | 304 | self._dirty = True |
|
303 | 305 | self._addpath(f) |
|
304 | 306 | self._map[f] = ('n', 0, -2, -1) |
|
305 | 307 | if f in self._copymap: |
|
306 | 308 | del self._copymap[f] |
|
307 | 309 | |
|
308 | 310 | def add(self, f): |
|
309 | 311 | 'mark a file added' |
|
310 | 312 | self._dirty = True |
|
311 | 313 | self._addpath(f, True) |
|
312 | 314 | self._map[f] = ('a', 0, -1, -1) |
|
313 | 315 | if f in self._copymap: |
|
314 | 316 | del self._copymap[f] |
|
315 | 317 | |
|
316 | 318 | def remove(self, f): |
|
317 | 319 | 'mark a file removed' |
|
318 | 320 | self._dirty = True |
|
319 | 321 | self._droppath(f) |
|
320 | 322 | size = 0 |
|
321 | 323 | if self._pl[1] != nullid and f in self._map: |
|
322 | 324 | entry = self._map[f] |
|
323 | 325 | if entry[0] == 'm': |
|
324 | 326 | size = -1 |
|
325 | 327 | elif entry[0] == 'n' and entry[2] == -2: |
|
326 | 328 | size = -2 |
|
327 | 329 | self._map[f] = ('r', 0, size, 0) |
|
328 | 330 | if size == 0 and f in self._copymap: |
|
329 | 331 | del self._copymap[f] |
|
330 | 332 | |
|
331 | 333 | def merge(self, f): |
|
332 | 334 | 'mark a file merged' |
|
333 | 335 | self._dirty = True |
|
334 | 336 | s = os.lstat(self._join(f)) |
|
335 | 337 | self._addpath(f) |
|
336 | 338 | self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime)) |
|
337 | 339 | if f in self._copymap: |
|
338 | 340 | del self._copymap[f] |
|
339 | 341 | |
|
340 | 342 | def forget(self, f): |
|
341 | 343 | 'forget a file' |
|
342 | 344 | self._dirty = True |
|
343 | 345 | try: |
|
344 | 346 | self._droppath(f) |
|
345 | 347 | del self._map[f] |
|
346 | 348 | except KeyError: |
|
347 | 349 | self._ui.warn(_("not in dirstate: %s\n") % f) |
|
348 | 350 | |
|
349 | 351 | def _normalize(self, path, knownpath): |
|
350 | 352 | norm_path = os.path.normcase(path) |
|
351 | 353 | fold_path = self._foldmap.get(norm_path, None) |
|
352 | 354 | if fold_path is None: |
|
353 | 355 | if knownpath or not os.path.exists(os.path.join(self._root, path)): |
|
354 | 356 | fold_path = path |
|
355 | 357 | else: |
|
356 | 358 | fold_path = self._foldmap.setdefault(norm_path, |
|
357 | 359 | util.fspath(path, self._root)) |
|
358 | 360 | return fold_path |
|
359 | 361 | |
|
360 | 362 | def clear(self): |
|
361 | 363 | self._map = {} |
|
362 | 364 | if "_dirs" in self.__dict__: |
|
363 | 365 | delattr(self, "_dirs"); |
|
364 | 366 | self._copymap = {} |
|
365 | 367 | self._pl = [nullid, nullid] |
|
366 | 368 | self._dirty = True |
|
367 | 369 | |
|
368 | 370 | def rebuild(self, parent, files): |
|
369 | 371 | self.clear() |
|
370 | 372 | for f in files: |
|
371 | 373 | if 'x' in files.flags(f): |
|
372 | 374 | self._map[f] = ('n', 0777, -1, 0) |
|
373 | 375 | else: |
|
374 | 376 | self._map[f] = ('n', 0666, -1, 0) |
|
375 | 377 | self._pl = (parent, nullid) |
|
376 | 378 | self._dirty = True |
|
377 | 379 | |
|
378 | 380 | def write(self): |
|
379 | 381 | if not self._dirty: |
|
380 | 382 | return |
|
381 | 383 | st = self._opener("dirstate", "w", atomictemp=True) |
|
382 | 384 | |
|
383 | 385 | # use the modification time of the newly created temporary file as the |
|
384 | 386 | # filesystem's notion of 'now' |
|
385 | 387 | now = int(util.fstat(st).st_mtime) |
|
386 | 388 | |
|
387 | 389 | cs = cStringIO.StringIO() |
|
388 | 390 | copymap = self._copymap |
|
389 | 391 | pack = struct.pack |
|
390 | 392 | write = cs.write |
|
391 | 393 | write("".join(self._pl)) |
|
392 | 394 | for f, e in self._map.iteritems(): |
|
393 | 395 | if f in copymap: |
|
394 | 396 | f = "%s\0%s" % (f, copymap[f]) |
|
395 | 397 | |
|
396 | 398 | if e[0] == 'n' and e[3] == now: |
|
397 | 399 | # The file was last modified "simultaneously" with the current |
|
398 | 400 | # write to dirstate (i.e. within the same second for file- |
|
399 | 401 | # systems with a granularity of 1 sec). This commonly happens |
|
400 | 402 | # for at least a couple of files on 'update'. |
|
401 | 403 | # The user could change the file without changing its size |
|
402 | 404 | # within the same second. Invalidate the file's stat data in |
|
403 | 405 | # dirstate, forcing future 'status' calls to compare the |
|
404 | 406 | # contents of the file. This prevents mistakenly treating such |
|
405 | 407 | # files as clean. |
|
406 | 408 | e = (e[0], 0, -1, -1) # mark entry as 'unset' |
|
407 | 409 | |
|
408 | 410 | e = pack(_format, e[0], e[1], e[2], e[3], len(f)) |
|
409 | 411 | write(e) |
|
410 | 412 | write(f) |
|
411 | 413 | st.write(cs.getvalue()) |
|
412 | 414 | st.rename() |
|
413 | 415 | self._dirty = self._dirtypl = False |
|
414 | 416 | |
|
415 | 417 | def _dirignore(self, f): |
|
416 | 418 | if f == '.': |
|
417 | 419 | return False |
|
418 | 420 | if self._ignore(f): |
|
419 | 421 | return True |
|
420 | 422 | for p in _finddirs(f): |
|
421 | 423 | if self._ignore(p): |
|
422 | 424 | return True |
|
423 | 425 | return False |
|
424 | 426 | |
|
425 | 427 | def walk(self, match, unknown, ignored): |
|
426 | 428 | ''' |
|
427 | 429 | Walk recursively through the directory tree, finding all files |
|
428 | 430 | matched by match. |
|
429 | 431 | |
|
430 | 432 | Return a dict mapping filename to stat-like object (either |
|
431 | 433 | mercurial.osutil.stat instance or return value of os.stat()). |
|
432 | 434 | ''' |
|
433 | 435 | |
|
434 | 436 | def fwarn(f, msg): |
|
435 | 437 | self._ui.warn('%s: %s\n' % (self.pathto(f), msg)) |
|
436 | 438 | return False |
|
437 | 439 | |
|
438 | 440 | def badtype(mode): |
|
439 | 441 | kind = _('unknown') |
|
440 | 442 | if stat.S_ISCHR(mode): kind = _('character device') |
|
441 | 443 | elif stat.S_ISBLK(mode): kind = _('block device') |
|
442 | 444 | elif stat.S_ISFIFO(mode): kind = _('fifo') |
|
443 | 445 | elif stat.S_ISSOCK(mode): kind = _('socket') |
|
444 | 446 | elif stat.S_ISDIR(mode): kind = _('directory') |
|
445 | 447 | return _('unsupported file type (type is %s)') % kind |
|
446 | 448 | |
|
447 | 449 | ignore = self._ignore |
|
448 | 450 | dirignore = self._dirignore |
|
449 | 451 | if ignored: |
|
450 | 452 | ignore = util.never |
|
451 | 453 | dirignore = util.never |
|
452 | 454 | elif not unknown: |
|
453 | 455 | # if unknown and ignored are False, skip step 2 |
|
454 | 456 | ignore = util.always |
|
455 | 457 | dirignore = util.always |
|
456 | 458 | |
|
457 | 459 | matchfn = match.matchfn |
|
458 | 460 | badfn = match.bad |
|
459 | 461 | dmap = self._map |
|
460 | 462 | normpath = util.normpath |
|
461 | 463 | listdir = osutil.listdir |
|
462 | 464 | lstat = os.lstat |
|
463 | 465 | getkind = stat.S_IFMT |
|
464 | 466 | dirkind = stat.S_IFDIR |
|
465 | 467 | regkind = stat.S_IFREG |
|
466 | 468 | lnkkind = stat.S_IFLNK |
|
467 | 469 | join = self._join |
|
468 | 470 | work = [] |
|
469 | 471 | wadd = work.append |
|
470 | 472 | |
|
471 | 473 | if self._checkcase: |
|
472 | 474 | normalize = self._normalize |
|
473 | 475 | else: |
|
474 | 476 | normalize = lambda x, y: x |
|
475 | 477 | |
|
476 | 478 | exact = skipstep3 = False |
|
477 | 479 | if matchfn == match.exact: # match.exact |
|
478 | 480 | exact = True |
|
479 | 481 | dirignore = util.always # skip step 2 |
|
480 | 482 | elif match.files() and not match.anypats(): # match.match, no patterns |
|
481 | 483 | skipstep3 = True |
|
482 | 484 | |
|
483 | 485 | files = set(match.files()) |
|
484 | 486 | if not files or '.' in files: |
|
485 | 487 | files = [''] |
|
486 | 488 | results = {'.hg': None} |
|
487 | 489 | |
|
488 | 490 | # step 1: find all explicit files |
|
489 | 491 | for ff in sorted(files): |
|
490 | 492 | nf = normalize(normpath(ff), False) |
|
491 | 493 | if nf in results: |
|
492 | 494 | continue |
|
493 | 495 | |
|
494 | 496 | try: |
|
495 | 497 | st = lstat(join(nf)) |
|
496 | 498 | kind = getkind(st.st_mode) |
|
497 | 499 | if kind == dirkind: |
|
498 | 500 | skipstep3 = False |
|
499 | 501 | if nf in dmap: |
|
500 | 502 | #file deleted on disk but still in dirstate |
|
501 | 503 | results[nf] = None |
|
502 | 504 | match.dir(nf) |
|
503 | 505 | if not dirignore(nf): |
|
504 | 506 | wadd(nf) |
|
505 | 507 | elif kind == regkind or kind == lnkkind: |
|
506 | 508 | results[nf] = st |
|
507 | 509 | else: |
|
508 | 510 | badfn(ff, badtype(kind)) |
|
509 | 511 | if nf in dmap: |
|
510 | 512 | results[nf] = None |
|
511 | 513 | except OSError, inst: |
|
512 | 514 | if nf in dmap: # does it exactly match a file? |
|
513 | 515 | results[nf] = None |
|
514 | 516 | else: # does it match a directory? |
|
515 | 517 | prefix = nf + "/" |
|
516 | 518 | for fn in dmap: |
|
517 | 519 | if fn.startswith(prefix): |
|
518 | 520 | match.dir(nf) |
|
519 | 521 | skipstep3 = False |
|
520 | 522 | break |
|
521 | 523 | else: |
|
522 | 524 | badfn(ff, inst.strerror) |
|
523 | 525 | |
|
524 | 526 | # step 2: visit subdirectories |
|
525 | 527 | while work: |
|
526 | 528 | nd = work.pop() |
|
527 | 529 | skip = None |
|
528 | 530 | if nd == '.': |
|
529 | 531 | nd = '' |
|
530 | 532 | else: |
|
531 | 533 | skip = '.hg' |
|
532 | 534 | try: |
|
533 | 535 | entries = listdir(join(nd), stat=True, skip=skip) |
|
534 | 536 | except OSError, inst: |
|
535 | 537 | if inst.errno == errno.EACCES: |
|
536 | 538 | fwarn(nd, inst.strerror) |
|
537 | 539 | continue |
|
538 | 540 | raise |
|
539 | 541 | for f, kind, st in entries: |
|
540 | 542 | nf = normalize(nd and (nd + "/" + f) or f, True) |
|
541 | 543 | if nf not in results: |
|
542 | 544 | if kind == dirkind: |
|
543 | 545 | if not ignore(nf): |
|
544 | 546 | match.dir(nf) |
|
545 | 547 | wadd(nf) |
|
546 | 548 | if nf in dmap and matchfn(nf): |
|
547 | 549 | results[nf] = None |
|
548 | 550 | elif kind == regkind or kind == lnkkind: |
|
549 | 551 | if nf in dmap: |
|
550 | 552 | if matchfn(nf): |
|
551 | 553 | results[nf] = st |
|
552 | 554 | elif matchfn(nf) and not ignore(nf): |
|
553 | 555 | results[nf] = st |
|
554 | 556 | elif nf in dmap and matchfn(nf): |
|
555 | 557 | results[nf] = None |
|
556 | 558 | |
|
557 | 559 | # step 3: report unseen items in the dmap hash |
|
558 | 560 | if not skipstep3 and not exact: |
|
559 | 561 | visit = sorted([f for f in dmap if f not in results and matchfn(f)]) |
|
560 | 562 | for nf, st in zip(visit, util.statfiles([join(i) for i in visit])): |
|
561 | 563 | if not st is None and not getkind(st.st_mode) in (regkind, lnkkind): |
|
562 | 564 | st = None |
|
563 | 565 | results[nf] = st |
|
564 | 566 | |
|
565 | 567 | del results['.hg'] |
|
566 | 568 | return results |
|
567 | 569 | |
|
568 | 570 | def status(self, match, ignored, clean, unknown): |
|
569 | 571 | '''Determine the status of the working copy relative to the |
|
570 | 572 | dirstate and return a tuple of lists (unsure, modified, added, |
|
571 | 573 | removed, deleted, unknown, ignored, clean), where: |
|
572 | 574 | |
|
573 | 575 | unsure: |
|
574 | 576 | files that might have been modified since the dirstate was |
|
575 | 577 | written, but need to be read to be sure (size is the same |
|
576 | 578 | but mtime differs) |
|
577 | 579 | modified: |
|
578 | 580 | files that have definitely been modified since the dirstate |
|
579 | 581 | was written (different size or mode) |
|
580 | 582 | added: |
|
581 | 583 | files that have been explicitly added with hg add |
|
582 | 584 | removed: |
|
583 | 585 | files that have been explicitly removed with hg remove |
|
584 | 586 | deleted: |
|
585 | 587 | files that have been deleted through other means ("missing") |
|
586 | 588 | unknown: |
|
587 | 589 | files not in the dirstate that are not ignored |
|
588 | 590 | ignored: |
|
589 | 591 | files not in the dirstate that are ignored |
|
590 | 592 | (by _dirignore()) |
|
591 | 593 | clean: |
|
592 | 594 | files that have definitely not been modified since the |
|
593 | 595 | dirstate was written |
|
594 | 596 | ''' |
|
595 | 597 | listignored, listclean, listunknown = ignored, clean, unknown |
|
596 | 598 | lookup, modified, added, unknown, ignored = [], [], [], [], [] |
|
597 | 599 | removed, deleted, clean = [], [], [] |
|
598 | 600 | |
|
599 | 601 | dmap = self._map |
|
600 | 602 | ladd = lookup.append # aka "unsure" |
|
601 | 603 | madd = modified.append |
|
602 | 604 | aadd = added.append |
|
603 | 605 | uadd = unknown.append |
|
604 | 606 | iadd = ignored.append |
|
605 | 607 | radd = removed.append |
|
606 | 608 | dadd = deleted.append |
|
607 | 609 | cadd = clean.append |
|
608 | 610 | |
|
609 | 611 | for fn, st in self.walk(match, listunknown, listignored).iteritems(): |
|
610 | 612 | if fn not in dmap: |
|
611 | 613 | if (listignored or match.exact(fn)) and self._dirignore(fn): |
|
612 | 614 | if listignored: |
|
613 | 615 | iadd(fn) |
|
614 | 616 | elif listunknown: |
|
615 | 617 | uadd(fn) |
|
616 | 618 | continue |
|
617 | 619 | |
|
618 | 620 | state, mode, size, time = dmap[fn] |
|
619 | 621 | |
|
620 | 622 | if not st and state in "nma": |
|
621 | 623 | dadd(fn) |
|
622 | 624 | elif state == 'n': |
|
623 | 625 | if (size >= 0 and |
|
624 | 626 | (size != st.st_size |
|
625 | 627 | or ((mode ^ st.st_mode) & 0100 and self._checkexec)) |
|
626 | 628 | or size == -2 |
|
627 | 629 | or fn in self._copymap): |
|
628 | 630 | madd(fn) |
|
629 | 631 | elif time != int(st.st_mtime): |
|
630 | 632 | ladd(fn) |
|
631 | 633 | elif listclean: |
|
632 | 634 | cadd(fn) |
|
633 | 635 | elif state == 'm': |
|
634 | 636 | madd(fn) |
|
635 | 637 | elif state == 'a': |
|
636 | 638 | aadd(fn) |
|
637 | 639 | elif state == 'r': |
|
638 | 640 | radd(fn) |
|
639 | 641 | |
|
640 | 642 | return (lookup, modified, added, removed, deleted, unknown, ignored, |
|
641 | 643 | clean) |
@@ -1,90 +1,94 b'' | |||
|
1 | 1 | #!/bin/sh |
|
2 | 2 | |
|
3 | 3 | hg init a |
|
4 | 4 | cd a |
|
5 | 5 | echo 'root' >root |
|
6 | 6 | hg add root |
|
7 | 7 | hg commit -d '0 0' -m "Adding root node" |
|
8 | 8 | |
|
9 | 9 | echo 'a' >a |
|
10 | 10 | hg add a |
|
11 | 11 | hg branch a |
|
12 | 12 | hg commit -d '1 0' -m "Adding a branch" |
|
13 | 13 | |
|
14 | 14 | hg branch q |
|
15 | 15 | echo 'aa' >a |
|
16 | 16 | hg branch -C |
|
17 | 17 | hg commit -d '2 0' -m "Adding to a branch" |
|
18 | 18 | |
|
19 | 19 | hg update -C 0 |
|
20 | 20 | echo 'b' >b |
|
21 | 21 | hg add b |
|
22 | 22 | hg branch b |
|
23 | 23 | hg commit -d '2 0' -m "Adding b branch" |
|
24 | 24 | |
|
25 | 25 | echo 'bh1' >bh1 |
|
26 | 26 | hg add bh1 |
|
27 | 27 | hg commit -d '3 0' -m "Adding b branch head 1" |
|
28 | 28 | |
|
29 | 29 | hg update -C 2 |
|
30 | 30 | echo 'bh2' >bh2 |
|
31 | 31 | hg add bh2 |
|
32 | 32 | hg commit -d '4 0' -m "Adding b branch head 2" |
|
33 | 33 | |
|
34 | 34 | echo 'c' >c |
|
35 | 35 | hg add c |
|
36 | 36 | hg branch c |
|
37 | 37 | hg commit -d '5 0' -m "Adding c branch" |
|
38 | 38 | |
|
39 | hg branch tip | |
|
40 | hg branch null | |
|
41 | hg branch . | |
|
42 | ||
|
39 | 43 | echo 'd' >d |
|
40 | 44 | hg add d |
|
41 | 45 | hg branch 'a branch name much longer than the default justification used by branches' |
|
42 | 46 | hg commit -d '6 0' -m "Adding d branch" |
|
43 | 47 | |
|
44 | 48 | hg branches |
|
45 | 49 | echo '-------' |
|
46 | 50 | hg branches -a |
|
47 | 51 | |
|
48 | 52 | echo "--- Branch a" |
|
49 | 53 | hg log -b a |
|
50 | 54 | |
|
51 | 55 | echo "---- Branch b" |
|
52 | 56 | hg log -b b |
|
53 | 57 | |
|
54 | 58 | echo "---- going to test branch closing" |
|
55 | 59 | hg branches |
|
56 | 60 | hg up -C b |
|
57 | 61 | echo 'xxx1' >> b |
|
58 | 62 | hg commit -d '7 0' -m 'adding cset to branch b' |
|
59 | 63 | hg up -C aee39cd168d0 |
|
60 | 64 | echo 'xxx2' >> b |
|
61 | 65 | hg commit -d '8 0' -m 'adding head to branch b' |
|
62 | 66 | echo 'xxx3' >> b |
|
63 | 67 | hg commit -d '9 0' -m 'adding another cset to branch b' |
|
64 | 68 | hg branches |
|
65 | 69 | hg heads --closed |
|
66 | 70 | hg heads |
|
67 | 71 | hg commit -d '9 0' --close-branch -m 'prune bad branch' |
|
68 | 72 | hg branches -a |
|
69 | 73 | hg up -C b |
|
70 | 74 | hg commit -d '9 0' --close-branch -m 'close this part branch too' |
|
71 | 75 | echo '--- b branch should be inactive' |
|
72 | 76 | hg branches |
|
73 | 77 | hg branches -c |
|
74 | 78 | hg branches -a |
|
75 | 79 | hg heads b |
|
76 | 80 | hg heads --closed b |
|
77 | 81 | echo 'xxx4' >> b |
|
78 | 82 | hg commit -d '9 0' -m 'reopen branch with a change' |
|
79 | 83 | echo '--- branch b is back in action' |
|
80 | 84 | hg branches -a |
|
81 | 85 | echo '---- test heads listings' |
|
82 | 86 | hg heads |
|
83 | 87 | echo '% branch default' |
|
84 | 88 | hg heads default |
|
85 | 89 | echo '% branch a' |
|
86 | 90 | hg heads a |
|
87 | 91 | hg heads --active a |
|
88 | 92 | echo '% branch b' |
|
89 | 93 | hg heads b |
|
90 | 94 | hg heads --closed b |
@@ -1,173 +1,176 b'' | |||
|
1 | 1 | marked working directory as branch a |
|
2 | 2 | marked working directory as branch q |
|
3 | 3 | reset working directory to branch a |
|
4 | 4 | 0 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
5 | 5 | marked working directory as branch b |
|
6 | 6 | created new head |
|
7 | 7 | 1 files updated, 0 files merged, 2 files removed, 0 files unresolved |
|
8 | 8 | marked working directory as branch c |
|
9 | abort: the name 'tip' is reserved | |
|
10 | abort: the name 'null' is reserved | |
|
11 | abort: the name '.' is reserved | |
|
9 | 12 | marked working directory as branch a branch name much longer than the default justification used by branches |
|
10 | 13 | a branch name much longer than the default justification used by branches 7:10ff5895aa57 |
|
11 | 14 | b 4:aee39cd168d0 |
|
12 | 15 | c 6:589736a22561 (inactive) |
|
13 | 16 | a 5:d8cbc61dbaa6 (inactive) |
|
14 | 17 | default 0:19709c5a4e75 (inactive) |
|
15 | 18 | ------- |
|
16 | 19 | a branch name much longer than the default justification used by branches 7:10ff5895aa57 |
|
17 | 20 | b 4:aee39cd168d0 |
|
18 | 21 | --- Branch a |
|
19 | 22 | changeset: 5:d8cbc61dbaa6 |
|
20 | 23 | branch: a |
|
21 | 24 | parent: 2:881fe2b92ad0 |
|
22 | 25 | user: test |
|
23 | 26 | date: Thu Jan 01 00:00:04 1970 +0000 |
|
24 | 27 | summary: Adding b branch head 2 |
|
25 | 28 | |
|
26 | 29 | changeset: 2:881fe2b92ad0 |
|
27 | 30 | branch: a |
|
28 | 31 | user: test |
|
29 | 32 | date: Thu Jan 01 00:00:02 1970 +0000 |
|
30 | 33 | summary: Adding to a branch |
|
31 | 34 | |
|
32 | 35 | changeset: 1:dd6b440dd85a |
|
33 | 36 | branch: a |
|
34 | 37 | user: test |
|
35 | 38 | date: Thu Jan 01 00:00:01 1970 +0000 |
|
36 | 39 | summary: Adding a branch |
|
37 | 40 | |
|
38 | 41 | ---- Branch b |
|
39 | 42 | changeset: 4:aee39cd168d0 |
|
40 | 43 | branch: b |
|
41 | 44 | user: test |
|
42 | 45 | date: Thu Jan 01 00:00:03 1970 +0000 |
|
43 | 46 | summary: Adding b branch head 1 |
|
44 | 47 | |
|
45 | 48 | changeset: 3:ac22033332d1 |
|
46 | 49 | branch: b |
|
47 | 50 | parent: 0:19709c5a4e75 |
|
48 | 51 | user: test |
|
49 | 52 | date: Thu Jan 01 00:00:02 1970 +0000 |
|
50 | 53 | summary: Adding b branch |
|
51 | 54 | |
|
52 | 55 | ---- going to test branch closing |
|
53 | 56 | a branch name much longer than the default justification used by branches 7:10ff5895aa57 |
|
54 | 57 | b 4:aee39cd168d0 |
|
55 | 58 | c 6:589736a22561 (inactive) |
|
56 | 59 | a 5:d8cbc61dbaa6 (inactive) |
|
57 | 60 | default 0:19709c5a4e75 (inactive) |
|
58 | 61 | 2 files updated, 0 files merged, 4 files removed, 0 files unresolved |
|
59 | 62 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
60 | 63 | created new head |
|
61 | 64 | b 10:bfbe841b666e |
|
62 | 65 | a branch name much longer than the default justification used by branches 7:10ff5895aa57 |
|
63 | 66 | c 6:589736a22561 (inactive) |
|
64 | 67 | a 5:d8cbc61dbaa6 (inactive) |
|
65 | 68 | default 0:19709c5a4e75 (inactive) |
|
66 | 69 | abort: you must specify a branch to use --closed |
|
67 | 70 | changeset: 10:bfbe841b666e |
|
68 | 71 | branch: b |
|
69 | 72 | tag: tip |
|
70 | 73 | user: test |
|
71 | 74 | date: Thu Jan 01 00:00:09 1970 +0000 |
|
72 | 75 | summary: adding another cset to branch b |
|
73 | 76 | |
|
74 | 77 | changeset: 8:eebb944467c9 |
|
75 | 78 | branch: b |
|
76 | 79 | parent: 4:aee39cd168d0 |
|
77 | 80 | user: test |
|
78 | 81 | date: Thu Jan 01 00:00:07 1970 +0000 |
|
79 | 82 | summary: adding cset to branch b |
|
80 | 83 | |
|
81 | 84 | changeset: 7:10ff5895aa57 |
|
82 | 85 | branch: a branch name much longer than the default justification used by branches |
|
83 | 86 | user: test |
|
84 | 87 | date: Thu Jan 01 00:00:06 1970 +0000 |
|
85 | 88 | summary: Adding d branch |
|
86 | 89 | |
|
87 | 90 | b 8:eebb944467c9 |
|
88 | 91 | a branch name much longer than the default justification used by branches 7:10ff5895aa57 |
|
89 | 92 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
90 | 93 | --- b branch should be inactive |
|
91 | 94 | a branch name much longer than the default justification used by branches 7:10ff5895aa57 |
|
92 | 95 | c 6:589736a22561 (inactive) |
|
93 | 96 | a 5:d8cbc61dbaa6 (inactive) |
|
94 | 97 | default 0:19709c5a4e75 (inactive) |
|
95 | 98 | a branch name much longer than the default justification used by branches 7:10ff5895aa57 |
|
96 | 99 | b 12:2da6583810df (closed) |
|
97 | 100 | c 6:589736a22561 (inactive) |
|
98 | 101 | a 5:d8cbc61dbaa6 (inactive) |
|
99 | 102 | default 0:19709c5a4e75 (inactive) |
|
100 | 103 | a branch name much longer than the default justification used by branches 7:10ff5895aa57 |
|
101 | 104 | no open branch heads on branch b |
|
102 | 105 | changeset: 12:2da6583810df |
|
103 | 106 | branch: b |
|
104 | 107 | tag: tip |
|
105 | 108 | parent: 8:eebb944467c9 |
|
106 | 109 | user: test |
|
107 | 110 | date: Thu Jan 01 00:00:09 1970 +0000 |
|
108 | 111 | summary: close this part branch too |
|
109 | 112 | |
|
110 | 113 | changeset: 11:c84627f3c15d |
|
111 | 114 | branch: b |
|
112 | 115 | user: test |
|
113 | 116 | date: Thu Jan 01 00:00:09 1970 +0000 |
|
114 | 117 | summary: prune bad branch |
|
115 | 118 | |
|
116 | 119 | --- branch b is back in action |
|
117 | 120 | b 13:6ac12926b8c3 |
|
118 | 121 | a branch name much longer than the default justification used by branches 7:10ff5895aa57 |
|
119 | 122 | ---- test heads listings |
|
120 | 123 | changeset: 13:6ac12926b8c3 |
|
121 | 124 | branch: b |
|
122 | 125 | tag: tip |
|
123 | 126 | user: test |
|
124 | 127 | date: Thu Jan 01 00:00:09 1970 +0000 |
|
125 | 128 | summary: reopen branch with a change |
|
126 | 129 | |
|
127 | 130 | changeset: 11:c84627f3c15d |
|
128 | 131 | branch: b |
|
129 | 132 | user: test |
|
130 | 133 | date: Thu Jan 01 00:00:09 1970 +0000 |
|
131 | 134 | summary: prune bad branch |
|
132 | 135 | |
|
133 | 136 | changeset: 7:10ff5895aa57 |
|
134 | 137 | branch: a branch name much longer than the default justification used by branches |
|
135 | 138 | user: test |
|
136 | 139 | date: Thu Jan 01 00:00:06 1970 +0000 |
|
137 | 140 | summary: Adding d branch |
|
138 | 141 | |
|
139 | 142 | % branch default |
|
140 | 143 | changeset: 0:19709c5a4e75 |
|
141 | 144 | user: test |
|
142 | 145 | date: Thu Jan 01 00:00:00 1970 +0000 |
|
143 | 146 | summary: Adding root node |
|
144 | 147 | |
|
145 | 148 | % branch a |
|
146 | 149 | changeset: 5:d8cbc61dbaa6 |
|
147 | 150 | branch: a |
|
148 | 151 | parent: 2:881fe2b92ad0 |
|
149 | 152 | user: test |
|
150 | 153 | date: Thu Jan 01 00:00:04 1970 +0000 |
|
151 | 154 | summary: Adding b branch head 2 |
|
152 | 155 | |
|
153 | 156 | % branch b |
|
154 | 157 | changeset: 13:6ac12926b8c3 |
|
155 | 158 | branch: b |
|
156 | 159 | tag: tip |
|
157 | 160 | user: test |
|
158 | 161 | date: Thu Jan 01 00:00:09 1970 +0000 |
|
159 | 162 | summary: reopen branch with a change |
|
160 | 163 | |
|
161 | 164 | changeset: 13:6ac12926b8c3 |
|
162 | 165 | branch: b |
|
163 | 166 | tag: tip |
|
164 | 167 | user: test |
|
165 | 168 | date: Thu Jan 01 00:00:09 1970 +0000 |
|
166 | 169 | summary: reopen branch with a change |
|
167 | 170 | |
|
168 | 171 | changeset: 11:c84627f3c15d |
|
169 | 172 | branch: b |
|
170 | 173 | user: test |
|
171 | 174 | date: Thu Jan 01 00:00:09 1970 +0000 |
|
172 | 175 | summary: prune bad branch |
|
173 | 176 |
General Comments 0
You need to be logged in to leave comments.
Login now