Show More
The requested changes are too big and content was truncated. Show full diff
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
@@ -1,641 +1,641 b'' | |||||
1 | # dirstate.py - working directory tracking for mercurial |
|
1 | # dirstate.py - working directory tracking for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2, incorporated herein by reference. |
|
6 | # GNU General Public License version 2, incorporated herein by reference. | |
7 |
|
7 | |||
8 | from node import nullid |
|
8 | from node import nullid | |
9 | from i18n import _ |
|
9 | from i18n import _ | |
10 | import util, ignore, osutil, parsers |
|
10 | import util, ignore, osutil, parsers | |
11 | import struct, os, stat, errno |
|
11 | import struct, os, stat, errno | |
12 |
import cStringIO |
|
12 | import cStringIO | |
13 |
|
13 | |||
14 | _unknown = ('?', 0, 0, 0) |
|
14 | _unknown = ('?', 0, 0, 0) | |
15 | _format = ">cllll" |
|
15 | _format = ">cllll" | |
16 | propertycache = util.propertycache |
|
16 | propertycache = util.propertycache | |
17 |
|
17 | |||
18 | def _finddirs(path): |
|
18 | def _finddirs(path): | |
19 | pos = path.rfind('/') |
|
19 | pos = path.rfind('/') | |
20 | while pos != -1: |
|
20 | while pos != -1: | |
21 | yield path[:pos] |
|
21 | yield path[:pos] | |
22 | pos = path.rfind('/', 0, pos) |
|
22 | pos = path.rfind('/', 0, pos) | |
23 |
|
23 | |||
24 | def _incdirs(dirs, path): |
|
24 | def _incdirs(dirs, path): | |
25 | for base in _finddirs(path): |
|
25 | for base in _finddirs(path): | |
26 | if base in dirs: |
|
26 | if base in dirs: | |
27 | dirs[base] += 1 |
|
27 | dirs[base] += 1 | |
28 | return |
|
28 | return | |
29 | dirs[base] = 1 |
|
29 | dirs[base] = 1 | |
30 |
|
30 | |||
31 | def _decdirs(dirs, path): |
|
31 | def _decdirs(dirs, path): | |
32 | for base in _finddirs(path): |
|
32 | for base in _finddirs(path): | |
33 | if dirs[base] > 1: |
|
33 | if dirs[base] > 1: | |
34 | dirs[base] -= 1 |
|
34 | dirs[base] -= 1 | |
35 | return |
|
35 | return | |
36 | del dirs[base] |
|
36 | del dirs[base] | |
37 |
|
37 | |||
38 | class dirstate(object): |
|
38 | class dirstate(object): | |
39 |
|
39 | |||
40 | def __init__(self, opener, ui, root): |
|
40 | def __init__(self, opener, ui, root): | |
41 | '''Create a new dirstate object. opener is an open()-like callable |
|
41 | '''Create a new dirstate object. opener is an open()-like callable | |
42 | that can be used to open the dirstate file; root is the root of the |
|
42 | that can be used to open the dirstate file; root is the root of the | |
43 | directory tracked by the dirstate.''' |
|
43 | directory tracked by the dirstate.''' | |
44 | self._opener = opener |
|
44 | self._opener = opener | |
45 | self._root = root |
|
45 | self._root = root | |
46 | self._rootdir = os.path.join(root, '') |
|
46 | self._rootdir = os.path.join(root, '') | |
47 | self._dirty = False |
|
47 | self._dirty = False | |
48 | self._dirtypl = False |
|
48 | self._dirtypl = False | |
49 | self._ui = ui |
|
49 | self._ui = ui | |
50 |
|
50 | |||
51 | @propertycache |
|
51 | @propertycache | |
52 | def _map(self): |
|
52 | def _map(self): | |
53 | '''Return the dirstate contents as a map from filename to |
|
53 | '''Return the dirstate contents as a map from filename to | |
54 | (state, mode, size, time).''' |
|
54 | (state, mode, size, time).''' | |
55 | self._read() |
|
55 | self._read() | |
56 | return self._map |
|
56 | return self._map | |
57 |
|
57 | |||
58 | @propertycache |
|
58 | @propertycache | |
59 | def _copymap(self): |
|
59 | def _copymap(self): | |
60 | self._read() |
|
60 | self._read() | |
61 | return self._copymap |
|
61 | return self._copymap | |
62 |
|
62 | |||
63 | @propertycache |
|
63 | @propertycache | |
64 | def _foldmap(self): |
|
64 | def _foldmap(self): | |
65 | f = {} |
|
65 | f = {} | |
66 | for name in self._map: |
|
66 | for name in self._map: | |
67 | f[os.path.normcase(name)] = name |
|
67 | f[os.path.normcase(name)] = name | |
68 | return f |
|
68 | return f | |
69 |
|
69 | |||
70 | @propertycache |
|
70 | @propertycache | |
71 | def _branch(self): |
|
71 | def _branch(self): | |
72 | try: |
|
72 | try: | |
73 | return self._opener("branch").read().strip() or "default" |
|
73 | return self._opener("branch").read().strip() or "default" | |
74 | except IOError: |
|
74 | except IOError: | |
75 | return "default" |
|
75 | return "default" | |
76 |
|
76 | |||
77 | @propertycache |
|
77 | @propertycache | |
78 | def _pl(self): |
|
78 | def _pl(self): | |
79 | try: |
|
79 | try: | |
80 | st = self._opener("dirstate").read(40) |
|
80 | st = self._opener("dirstate").read(40) | |
81 | l = len(st) |
|
81 | l = len(st) | |
82 | if l == 40: |
|
82 | if l == 40: | |
83 | return st[:20], st[20:40] |
|
83 | return st[:20], st[20:40] | |
84 | elif l > 0 and l < 40: |
|
84 | elif l > 0 and l < 40: | |
85 | raise util.Abort(_('working directory state appears damaged!')) |
|
85 | raise util.Abort(_('working directory state appears damaged!')) | |
86 | except IOError, err: |
|
86 | except IOError, err: | |
87 | if err.errno != errno.ENOENT: raise |
|
87 | if err.errno != errno.ENOENT: raise | |
88 | return [nullid, nullid] |
|
88 | return [nullid, nullid] | |
89 |
|
89 | |||
90 | @propertycache |
|
90 | @propertycache | |
91 | def _dirs(self): |
|
91 | def _dirs(self): | |
92 | dirs = {} |
|
92 | dirs = {} | |
93 | for f,s in self._map.iteritems(): |
|
93 | for f,s in self._map.iteritems(): | |
94 | if s[0] != 'r': |
|
94 | if s[0] != 'r': | |
95 | _incdirs(dirs, f) |
|
95 | _incdirs(dirs, f) | |
96 | return dirs |
|
96 | return dirs | |
97 |
|
97 | |||
98 | @propertycache |
|
98 | @propertycache | |
99 | def _ignore(self): |
|
99 | def _ignore(self): | |
100 | files = [self._join('.hgignore')] |
|
100 | files = [self._join('.hgignore')] | |
101 | for name, path in self._ui.configitems("ui"): |
|
101 | for name, path in self._ui.configitems("ui"): | |
102 | if name == 'ignore' or name.startswith('ignore.'): |
|
102 | if name == 'ignore' or name.startswith('ignore.'): | |
103 | files.append(util.expandpath(path)) |
|
103 | files.append(util.expandpath(path)) | |
104 | return ignore.ignore(self._root, files, self._ui.warn) |
|
104 | return ignore.ignore(self._root, files, self._ui.warn) | |
105 |
|
105 | |||
106 | @propertycache |
|
106 | @propertycache | |
107 | def _slash(self): |
|
107 | def _slash(self): | |
108 | return self._ui.configbool('ui', 'slash') and os.sep != '/' |
|
108 | return self._ui.configbool('ui', 'slash') and os.sep != '/' | |
109 |
|
109 | |||
110 | @propertycache |
|
110 | @propertycache | |
111 | def _checklink(self): |
|
111 | def _checklink(self): | |
112 | return util.checklink(self._root) |
|
112 | return util.checklink(self._root) | |
113 |
|
113 | |||
114 | @propertycache |
|
114 | @propertycache | |
115 | def _checkexec(self): |
|
115 | def _checkexec(self): | |
116 | return util.checkexec(self._root) |
|
116 | return util.checkexec(self._root) | |
117 |
|
117 | |||
118 | @propertycache |
|
118 | @propertycache | |
119 | def _checkcase(self): |
|
119 | def _checkcase(self): | |
120 | return not util.checkcase(self._join('.hg')) |
|
120 | return not util.checkcase(self._join('.hg')) | |
121 |
|
121 | |||
122 | def _join(self, f): |
|
122 | def _join(self, f): | |
123 | # much faster than os.path.join() |
|
123 | # much faster than os.path.join() | |
124 | # it's safe because f is always a relative path |
|
124 | # it's safe because f is always a relative path | |
125 | return self._rootdir + f |
|
125 | return self._rootdir + f | |
126 |
|
126 | |||
127 | def flagfunc(self, fallback): |
|
127 | def flagfunc(self, fallback): | |
128 | if self._checklink: |
|
128 | if self._checklink: | |
129 | if self._checkexec: |
|
129 | if self._checkexec: | |
130 | def f(x): |
|
130 | def f(x): | |
131 | p = self._join(x) |
|
131 | p = self._join(x) | |
132 | if os.path.islink(p): |
|
132 | if os.path.islink(p): | |
133 | return 'l' |
|
133 | return 'l' | |
134 | if util.is_exec(p): |
|
134 | if util.is_exec(p): | |
135 | return 'x' |
|
135 | return 'x' | |
136 | return '' |
|
136 | return '' | |
137 | return f |
|
137 | return f | |
138 | def f(x): |
|
138 | def f(x): | |
139 | if os.path.islink(self._join(x)): |
|
139 | if os.path.islink(self._join(x)): | |
140 | return 'l' |
|
140 | return 'l' | |
141 | if 'x' in fallback(x): |
|
141 | if 'x' in fallback(x): | |
142 | return 'x' |
|
142 | return 'x' | |
143 | return '' |
|
143 | return '' | |
144 | return f |
|
144 | return f | |
145 | if self._checkexec: |
|
145 | if self._checkexec: | |
146 | def f(x): |
|
146 | def f(x): | |
147 | if 'l' in fallback(x): |
|
147 | if 'l' in fallback(x): | |
148 | return 'l' |
|
148 | return 'l' | |
149 | if util.is_exec(self._join(x)): |
|
149 | if util.is_exec(self._join(x)): | |
150 | return 'x' |
|
150 | return 'x' | |
151 | return '' |
|
151 | return '' | |
152 | return f |
|
152 | return f | |
153 | return fallback |
|
153 | return fallback | |
154 |
|
154 | |||
155 | def getcwd(self): |
|
155 | def getcwd(self): | |
156 | cwd = os.getcwd() |
|
156 | cwd = os.getcwd() | |
157 | if cwd == self._root: return '' |
|
157 | if cwd == self._root: return '' | |
158 | # self._root ends with a path separator if self._root is '/' or 'C:\' |
|
158 | # self._root ends with a path separator if self._root is '/' or 'C:\' | |
159 | rootsep = self._root |
|
159 | rootsep = self._root | |
160 | if not util.endswithsep(rootsep): |
|
160 | if not util.endswithsep(rootsep): | |
161 | rootsep += os.sep |
|
161 | rootsep += os.sep | |
162 | if cwd.startswith(rootsep): |
|
162 | if cwd.startswith(rootsep): | |
163 | return cwd[len(rootsep):] |
|
163 | return cwd[len(rootsep):] | |
164 | else: |
|
164 | else: | |
165 | # we're outside the repo. return an absolute path. |
|
165 | # we're outside the repo. return an absolute path. | |
166 | return cwd |
|
166 | return cwd | |
167 |
|
167 | |||
168 | def pathto(self, f, cwd=None): |
|
168 | def pathto(self, f, cwd=None): | |
169 | if cwd is None: |
|
169 | if cwd is None: | |
170 | cwd = self.getcwd() |
|
170 | cwd = self.getcwd() | |
171 | path = util.pathto(self._root, cwd, f) |
|
171 | path = util.pathto(self._root, cwd, f) | |
172 | if self._slash: |
|
172 | if self._slash: | |
173 | return util.normpath(path) |
|
173 | return util.normpath(path) | |
174 | return path |
|
174 | return path | |
175 |
|
175 | |||
176 | def __getitem__(self, key): |
|
176 | def __getitem__(self, key): | |
177 | '''Return the current state of key (a filename) in the dirstate. |
|
177 | '''Return the current state of key (a filename) in the dirstate. | |
178 | States are: |
|
178 | States are: | |
179 | n normal |
|
179 | n normal | |
180 | m needs merging |
|
180 | m needs merging | |
181 | r marked for removal |
|
181 | r marked for removal | |
182 | a marked for addition |
|
182 | a marked for addition | |
183 | ? not tracked |
|
183 | ? not tracked | |
184 | ''' |
|
184 | ''' | |
185 | return self._map.get(key, ("?",))[0] |
|
185 | return self._map.get(key, ("?",))[0] | |
186 |
|
186 | |||
187 | def __contains__(self, key): |
|
187 | def __contains__(self, key): | |
188 | return key in self._map |
|
188 | return key in self._map | |
189 |
|
189 | |||
190 | def __iter__(self): |
|
190 | def __iter__(self): | |
191 | for x in sorted(self._map): |
|
191 | for x in sorted(self._map): | |
192 | yield x |
|
192 | yield x | |
193 |
|
193 | |||
194 | def parents(self): |
|
194 | def parents(self): | |
195 | return self._pl |
|
195 | return self._pl | |
196 |
|
196 | |||
197 | def branch(self): |
|
197 | def branch(self): | |
198 | return self._branch |
|
198 | return self._branch | |
199 |
|
199 | |||
200 | def setparents(self, p1, p2=nullid): |
|
200 | def setparents(self, p1, p2=nullid): | |
201 | self._dirty = self._dirtypl = True |
|
201 | self._dirty = self._dirtypl = True | |
202 | self._pl = p1, p2 |
|
202 | self._pl = p1, p2 | |
203 |
|
203 | |||
204 | def setbranch(self, branch): |
|
204 | def setbranch(self, branch): | |
205 | self._branch = branch |
|
205 | self._branch = branch | |
206 | self._opener("branch", "w").write(branch + '\n') |
|
206 | self._opener("branch", "w").write(branch + '\n') | |
207 |
|
207 | |||
208 | def _read(self): |
|
208 | def _read(self): | |
209 | self._map = {} |
|
209 | self._map = {} | |
210 | self._copymap = {} |
|
210 | self._copymap = {} | |
211 | try: |
|
211 | try: | |
212 | st = self._opener("dirstate").read() |
|
212 | st = self._opener("dirstate").read() | |
213 | except IOError, err: |
|
213 | except IOError, err: | |
214 | if err.errno != errno.ENOENT: raise |
|
214 | if err.errno != errno.ENOENT: raise | |
215 | return |
|
215 | return | |
216 | if not st: |
|
216 | if not st: | |
217 | return |
|
217 | return | |
218 |
|
218 | |||
219 | p = parsers.parse_dirstate(self._map, self._copymap, st) |
|
219 | p = parsers.parse_dirstate(self._map, self._copymap, st) | |
220 | if not self._dirtypl: |
|
220 | if not self._dirtypl: | |
221 | self._pl = p |
|
221 | self._pl = p | |
222 |
|
222 | |||
223 | def invalidate(self): |
|
223 | def invalidate(self): | |
224 | for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split(): |
|
224 | for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split(): | |
225 | if a in self.__dict__: |
|
225 | if a in self.__dict__: | |
226 | delattr(self, a) |
|
226 | delattr(self, a) | |
227 | self._dirty = False |
|
227 | self._dirty = False | |
228 |
|
228 | |||
229 | def copy(self, source, dest): |
|
229 | def copy(self, source, dest): | |
230 | """Mark dest as a copy of source. Unmark dest if source is None. |
|
230 | """Mark dest as a copy of source. Unmark dest if source is None. | |
231 | """ |
|
231 | """ | |
232 | if source == dest: |
|
232 | if source == dest: | |
233 | return |
|
233 | return | |
234 | self._dirty = True |
|
234 | self._dirty = True | |
235 | if source is not None: |
|
235 | if source is not None: | |
236 | self._copymap[dest] = source |
|
236 | self._copymap[dest] = source | |
237 | elif dest in self._copymap: |
|
237 | elif dest in self._copymap: | |
238 | del self._copymap[dest] |
|
238 | del self._copymap[dest] | |
239 |
|
239 | |||
240 | def copied(self, file): |
|
240 | def copied(self, file): | |
241 | return self._copymap.get(file, None) |
|
241 | return self._copymap.get(file, None) | |
242 |
|
242 | |||
243 | def copies(self): |
|
243 | def copies(self): | |
244 | return self._copymap |
|
244 | return self._copymap | |
245 |
|
245 | |||
246 | def _droppath(self, f): |
|
246 | def _droppath(self, f): | |
247 | if self[f] not in "?r" and "_dirs" in self.__dict__: |
|
247 | if self[f] not in "?r" and "_dirs" in self.__dict__: | |
248 | _decdirs(self._dirs, f) |
|
248 | _decdirs(self._dirs, f) | |
249 |
|
249 | |||
250 | def _addpath(self, f, check=False): |
|
250 | def _addpath(self, f, check=False): | |
251 | oldstate = self[f] |
|
251 | oldstate = self[f] | |
252 | if check or oldstate == "r": |
|
252 | if check or oldstate == "r": | |
253 | if '\r' in f or '\n' in f: |
|
253 | if '\r' in f or '\n' in f: | |
254 | raise util.Abort( |
|
254 | raise util.Abort( | |
255 | _("'\\n' and '\\r' disallowed in filenames: %r") % f) |
|
255 | _("'\\n' and '\\r' disallowed in filenames: %r") % f) | |
256 | if f in self._dirs: |
|
256 | if f in self._dirs: | |
257 | raise util.Abort(_('directory %r already in dirstate') % f) |
|
257 | raise util.Abort(_('directory %r already in dirstate') % f) | |
258 | # shadows |
|
258 | # shadows | |
259 | for d in _finddirs(f): |
|
259 | for d in _finddirs(f): | |
260 | if d in self._dirs: |
|
260 | if d in self._dirs: | |
261 | break |
|
261 | break | |
262 | if d in self._map and self[d] != 'r': |
|
262 | if d in self._map and self[d] != 'r': | |
263 | raise util.Abort( |
|
263 | raise util.Abort( | |
264 | _('file %r in dirstate clashes with %r') % (d, f)) |
|
264 | _('file %r in dirstate clashes with %r') % (d, f)) | |
265 | if oldstate in "?r" and "_dirs" in self.__dict__: |
|
265 | if oldstate in "?r" and "_dirs" in self.__dict__: | |
266 | _incdirs(self._dirs, f) |
|
266 | _incdirs(self._dirs, f) | |
267 |
|
267 | |||
268 | def normal(self, f): |
|
268 | def normal(self, f): | |
269 | 'mark a file normal and clean' |
|
269 | 'mark a file normal and clean' | |
270 | self._dirty = True |
|
270 | self._dirty = True | |
271 | self._addpath(f) |
|
271 | self._addpath(f) | |
272 | s = os.lstat(self._join(f)) |
|
272 | s = os.lstat(self._join(f)) | |
273 | self._map[f] = ('n', s.st_mode, s.st_size, int(s.st_mtime)) |
|
273 | self._map[f] = ('n', s.st_mode, s.st_size, int(s.st_mtime)) | |
274 | if f in self._copymap: |
|
274 | if f in self._copymap: | |
275 | del self._copymap[f] |
|
275 | del self._copymap[f] | |
276 |
|
276 | |||
277 | def normallookup(self, f): |
|
277 | def normallookup(self, f): | |
278 | 'mark a file normal, but possibly dirty' |
|
278 | 'mark a file normal, but possibly dirty' | |
279 | if self._pl[1] != nullid and f in self._map: |
|
279 | if self._pl[1] != nullid and f in self._map: | |
280 | # if there is a merge going on and the file was either |
|
280 | # if there is a merge going on and the file was either | |
281 | # in state 'm' or dirty before being removed, restore that state. |
|
281 | # in state 'm' or dirty before being removed, restore that state. | |
282 | entry = self._map[f] |
|
282 | entry = self._map[f] | |
283 | if entry[0] == 'r' and entry[2] in (-1, -2): |
|
283 | if entry[0] == 'r' and entry[2] in (-1, -2): | |
284 | source = self._copymap.get(f) |
|
284 | source = self._copymap.get(f) | |
285 | if entry[2] == -1: |
|
285 | if entry[2] == -1: | |
286 | self.merge(f) |
|
286 | self.merge(f) | |
287 | elif entry[2] == -2: |
|
287 | elif entry[2] == -2: | |
288 | self.normaldirty(f) |
|
288 | self.normaldirty(f) | |
289 | if source: |
|
289 | if source: | |
290 | self.copy(source, f) |
|
290 | self.copy(source, f) | |
291 | return |
|
291 | return | |
292 | if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2: |
|
292 | if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2: | |
293 | return |
|
293 | return | |
294 | self._dirty = True |
|
294 | self._dirty = True | |
295 | self._addpath(f) |
|
295 | self._addpath(f) | |
296 | self._map[f] = ('n', 0, -1, -1) |
|
296 | self._map[f] = ('n', 0, -1, -1) | |
297 | if f in self._copymap: |
|
297 | if f in self._copymap: | |
298 | del self._copymap[f] |
|
298 | del self._copymap[f] | |
299 |
|
299 | |||
300 | def normaldirty(self, f): |
|
300 | def normaldirty(self, f): | |
301 | 'mark a file normal, but dirty' |
|
301 | 'mark a file normal, but dirty' | |
302 | self._dirty = True |
|
302 | self._dirty = True | |
303 | self._addpath(f) |
|
303 | self._addpath(f) | |
304 | self._map[f] = ('n', 0, -2, -1) |
|
304 | self._map[f] = ('n', 0, -2, -1) | |
305 | if f in self._copymap: |
|
305 | if f in self._copymap: | |
306 | del self._copymap[f] |
|
306 | del self._copymap[f] | |
307 |
|
307 | |||
308 | def add(self, f): |
|
308 | def add(self, f): | |
309 | 'mark a file added' |
|
309 | 'mark a file added' | |
310 | self._dirty = True |
|
310 | self._dirty = True | |
311 | self._addpath(f, True) |
|
311 | self._addpath(f, True) | |
312 | self._map[f] = ('a', 0, -1, -1) |
|
312 | self._map[f] = ('a', 0, -1, -1) | |
313 | if f in self._copymap: |
|
313 | if f in self._copymap: | |
314 | del self._copymap[f] |
|
314 | del self._copymap[f] | |
315 |
|
315 | |||
316 | def remove(self, f): |
|
316 | def remove(self, f): | |
317 | 'mark a file removed' |
|
317 | 'mark a file removed' | |
318 | self._dirty = True |
|
318 | self._dirty = True | |
319 | self._droppath(f) |
|
319 | self._droppath(f) | |
320 | size = 0 |
|
320 | size = 0 | |
321 | if self._pl[1] != nullid and f in self._map: |
|
321 | if self._pl[1] != nullid and f in self._map: | |
322 | entry = self._map[f] |
|
322 | entry = self._map[f] | |
323 | if entry[0] == 'm': |
|
323 | if entry[0] == 'm': | |
324 | size = -1 |
|
324 | size = -1 | |
325 | elif entry[0] == 'n' and entry[2] == -2: |
|
325 | elif entry[0] == 'n' and entry[2] == -2: | |
326 | size = -2 |
|
326 | size = -2 | |
327 | self._map[f] = ('r', 0, size, 0) |
|
327 | self._map[f] = ('r', 0, size, 0) | |
328 | if size == 0 and f in self._copymap: |
|
328 | if size == 0 and f in self._copymap: | |
329 | del self._copymap[f] |
|
329 | del self._copymap[f] | |
330 |
|
330 | |||
331 | def merge(self, f): |
|
331 | def merge(self, f): | |
332 | 'mark a file merged' |
|
332 | 'mark a file merged' | |
333 | self._dirty = True |
|
333 | self._dirty = True | |
334 | s = os.lstat(self._join(f)) |
|
334 | s = os.lstat(self._join(f)) | |
335 | self._addpath(f) |
|
335 | self._addpath(f) | |
336 | self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime)) |
|
336 | self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime)) | |
337 | if f in self._copymap: |
|
337 | if f in self._copymap: | |
338 | del self._copymap[f] |
|
338 | del self._copymap[f] | |
339 |
|
339 | |||
340 | def forget(self, f): |
|
340 | def forget(self, f): | |
341 | 'forget a file' |
|
341 | 'forget a file' | |
342 | self._dirty = True |
|
342 | self._dirty = True | |
343 | try: |
|
343 | try: | |
344 | self._droppath(f) |
|
344 | self._droppath(f) | |
345 | del self._map[f] |
|
345 | del self._map[f] | |
346 | except KeyError: |
|
346 | except KeyError: | |
347 | self._ui.warn(_("not in dirstate: %s\n") % f) |
|
347 | self._ui.warn(_("not in dirstate: %s\n") % f) | |
348 |
|
348 | |||
349 | def _normalize(self, path, knownpath): |
|
349 | def _normalize(self, path, knownpath): | |
350 | norm_path = os.path.normcase(path) |
|
350 | norm_path = os.path.normcase(path) | |
351 | fold_path = self._foldmap.get(norm_path, None) |
|
351 | fold_path = self._foldmap.get(norm_path, None) | |
352 | if fold_path is None: |
|
352 | if fold_path is None: | |
353 | if knownpath or not os.path.exists(os.path.join(self._root, path)): |
|
353 | if knownpath or not os.path.exists(os.path.join(self._root, path)): | |
354 | fold_path = path |
|
354 | fold_path = path | |
355 | else: |
|
355 | else: | |
356 | fold_path = self._foldmap.setdefault(norm_path, |
|
356 | fold_path = self._foldmap.setdefault(norm_path, | |
357 | util.fspath(path, self._root)) |
|
357 | util.fspath(path, self._root)) | |
358 | return fold_path |
|
358 | return fold_path | |
359 |
|
359 | |||
360 | def clear(self): |
|
360 | def clear(self): | |
361 | self._map = {} |
|
361 | self._map = {} | |
362 | if "_dirs" in self.__dict__: |
|
362 | if "_dirs" in self.__dict__: | |
363 | delattr(self, "_dirs"); |
|
363 | delattr(self, "_dirs"); | |
364 | self._copymap = {} |
|
364 | self._copymap = {} | |
365 | self._pl = [nullid, nullid] |
|
365 | self._pl = [nullid, nullid] | |
366 | self._dirty = True |
|
366 | self._dirty = True | |
367 |
|
367 | |||
368 | def rebuild(self, parent, files): |
|
368 | def rebuild(self, parent, files): | |
369 | self.clear() |
|
369 | self.clear() | |
370 | for f in files: |
|
370 | for f in files: | |
371 | if 'x' in files.flags(f): |
|
371 | if 'x' in files.flags(f): | |
372 | self._map[f] = ('n', 0777, -1, 0) |
|
372 | self._map[f] = ('n', 0777, -1, 0) | |
373 | else: |
|
373 | else: | |
374 | self._map[f] = ('n', 0666, -1, 0) |
|
374 | self._map[f] = ('n', 0666, -1, 0) | |
375 | self._pl = (parent, nullid) |
|
375 | self._pl = (parent, nullid) | |
376 | self._dirty = True |
|
376 | self._dirty = True | |
377 |
|
377 | |||
378 | def write(self): |
|
378 | def write(self): | |
379 | if not self._dirty: |
|
379 | if not self._dirty: | |
380 | return |
|
380 | return | |
381 | st = self._opener("dirstate", "w", atomictemp=True) |
|
381 | st = self._opener("dirstate", "w", atomictemp=True) | |
382 |
|
382 | |||
383 | # use the modification time of the newly created temporary file as the |
|
383 | # use the modification time of the newly created temporary file as the | |
384 | # filesystem's notion of 'now' |
|
384 | # filesystem's notion of 'now' | |
385 | now = int(util.fstat(st).st_mtime) |
|
385 | now = int(util.fstat(st).st_mtime) | |
386 |
|
386 | |||
387 | cs = cStringIO.StringIO() |
|
387 | cs = cStringIO.StringIO() | |
388 | copymap = self._copymap |
|
388 | copymap = self._copymap | |
389 | pack = struct.pack |
|
389 | pack = struct.pack | |
390 | write = cs.write |
|
390 | write = cs.write | |
391 | write("".join(self._pl)) |
|
391 | write("".join(self._pl)) | |
392 | for f, e in self._map.iteritems(): |
|
392 | for f, e in self._map.iteritems(): | |
393 | if f in copymap: |
|
393 | if f in copymap: | |
394 | f = "%s\0%s" % (f, copymap[f]) |
|
394 | f = "%s\0%s" % (f, copymap[f]) | |
395 |
|
395 | |||
396 | if e[0] == 'n' and e[3] == now: |
|
396 | if e[0] == 'n' and e[3] == now: | |
397 | # The file was last modified "simultaneously" with the current |
|
397 | # The file was last modified "simultaneously" with the current | |
398 | # write to dirstate (i.e. within the same second for file- |
|
398 | # write to dirstate (i.e. within the same second for file- | |
399 | # systems with a granularity of 1 sec). This commonly happens |
|
399 | # systems with a granularity of 1 sec). This commonly happens | |
400 | # for at least a couple of files on 'update'. |
|
400 | # for at least a couple of files on 'update'. | |
401 | # The user could change the file without changing its size |
|
401 | # The user could change the file without changing its size | |
402 | # within the same second. Invalidate the file's stat data in |
|
402 | # within the same second. Invalidate the file's stat data in | |
403 | # dirstate, forcing future 'status' calls to compare the |
|
403 | # dirstate, forcing future 'status' calls to compare the | |
404 | # contents of the file. This prevents mistakenly treating such |
|
404 | # contents of the file. This prevents mistakenly treating such | |
405 | # files as clean. |
|
405 | # files as clean. | |
406 | e = (e[0], 0, -1, -1) # mark entry as 'unset' |
|
406 | e = (e[0], 0, -1, -1) # mark entry as 'unset' | |
407 |
|
407 | |||
408 | e = pack(_format, e[0], e[1], e[2], e[3], len(f)) |
|
408 | e = pack(_format, e[0], e[1], e[2], e[3], len(f)) | |
409 | write(e) |
|
409 | write(e) | |
410 | write(f) |
|
410 | write(f) | |
411 | st.write(cs.getvalue()) |
|
411 | st.write(cs.getvalue()) | |
412 | st.rename() |
|
412 | st.rename() | |
413 | self._dirty = self._dirtypl = False |
|
413 | self._dirty = self._dirtypl = False | |
414 |
|
414 | |||
415 | def _dirignore(self, f): |
|
415 | def _dirignore(self, f): | |
416 | if f == '.': |
|
416 | if f == '.': | |
417 | return False |
|
417 | return False | |
418 | if self._ignore(f): |
|
418 | if self._ignore(f): | |
419 | return True |
|
419 | return True | |
420 | for p in _finddirs(f): |
|
420 | for p in _finddirs(f): | |
421 | if self._ignore(p): |
|
421 | if self._ignore(p): | |
422 | return True |
|
422 | return True | |
423 | return False |
|
423 | return False | |
424 |
|
424 | |||
425 | def walk(self, match, unknown, ignored): |
|
425 | def walk(self, match, unknown, ignored): | |
426 | ''' |
|
426 | ''' | |
427 | Walk recursively through the directory tree, finding all files |
|
427 | Walk recursively through the directory tree, finding all files | |
428 | matched by match. |
|
428 | matched by match. | |
429 |
|
429 | |||
430 | Return a dict mapping filename to stat-like object (either |
|
430 | Return a dict mapping filename to stat-like object (either | |
431 | mercurial.osutil.stat instance or return value of os.stat()). |
|
431 | mercurial.osutil.stat instance or return value of os.stat()). | |
432 | ''' |
|
432 | ''' | |
433 |
|
433 | |||
434 | def fwarn(f, msg): |
|
434 | def fwarn(f, msg): | |
435 | self._ui.warn('%s: %s\n' % (self.pathto(f), msg)) |
|
435 | self._ui.warn('%s: %s\n' % (self.pathto(f), msg)) | |
436 | return False |
|
436 | return False | |
437 |
|
437 | |||
438 | def badtype(mode): |
|
438 | def badtype(mode): | |
439 | kind = _('unknown') |
|
439 | kind = _('unknown') | |
440 | if stat.S_ISCHR(mode): kind = _('character device') |
|
440 | if stat.S_ISCHR(mode): kind = _('character device') | |
441 | elif stat.S_ISBLK(mode): kind = _('block device') |
|
441 | elif stat.S_ISBLK(mode): kind = _('block device') | |
442 | elif stat.S_ISFIFO(mode): kind = _('fifo') |
|
442 | elif stat.S_ISFIFO(mode): kind = _('fifo') | |
443 | elif stat.S_ISSOCK(mode): kind = _('socket') |
|
443 | elif stat.S_ISSOCK(mode): kind = _('socket') | |
444 | elif stat.S_ISDIR(mode): kind = _('directory') |
|
444 | elif stat.S_ISDIR(mode): kind = _('directory') | |
445 | return _('unsupported file type (type is %s)') % kind |
|
445 | return _('unsupported file type (type is %s)') % kind | |
446 |
|
446 | |||
447 | ignore = self._ignore |
|
447 | ignore = self._ignore | |
448 | dirignore = self._dirignore |
|
448 | dirignore = self._dirignore | |
449 | if ignored: |
|
449 | if ignored: | |
450 | ignore = util.never |
|
450 | ignore = util.never | |
451 | dirignore = util.never |
|
451 | dirignore = util.never | |
452 | elif not unknown: |
|
452 | elif not unknown: | |
453 | # if unknown and ignored are False, skip step 2 |
|
453 | # if unknown and ignored are False, skip step 2 | |
454 | ignore = util.always |
|
454 | ignore = util.always | |
455 | dirignore = util.always |
|
455 | dirignore = util.always | |
456 |
|
456 | |||
457 | matchfn = match.matchfn |
|
457 | matchfn = match.matchfn | |
458 | badfn = match.bad |
|
458 | badfn = match.bad | |
459 | dmap = self._map |
|
459 | dmap = self._map | |
460 | normpath = util.normpath |
|
460 | normpath = util.normpath | |
461 | listdir = osutil.listdir |
|
461 | listdir = osutil.listdir | |
462 | lstat = os.lstat |
|
462 | lstat = os.lstat | |
463 | getkind = stat.S_IFMT |
|
463 | getkind = stat.S_IFMT | |
464 | dirkind = stat.S_IFDIR |
|
464 | dirkind = stat.S_IFDIR | |
465 | regkind = stat.S_IFREG |
|
465 | regkind = stat.S_IFREG | |
466 | lnkkind = stat.S_IFLNK |
|
466 | lnkkind = stat.S_IFLNK | |
467 | join = self._join |
|
467 | join = self._join | |
468 | work = [] |
|
468 | work = [] | |
469 | wadd = work.append |
|
469 | wadd = work.append | |
470 |
|
470 | |||
471 | if self._checkcase: |
|
471 | if self._checkcase: | |
472 | normalize = self._normalize |
|
472 | normalize = self._normalize | |
473 | else: |
|
473 | else: | |
474 | normalize = lambda x, y: x |
|
474 | normalize = lambda x, y: x | |
475 |
|
475 | |||
476 | exact = skipstep3 = False |
|
476 | exact = skipstep3 = False | |
477 | if matchfn == match.exact: # match.exact |
|
477 | if matchfn == match.exact: # match.exact | |
478 | exact = True |
|
478 | exact = True | |
479 | dirignore = util.always # skip step 2 |
|
479 | dirignore = util.always # skip step 2 | |
480 | elif match.files() and not match.anypats(): # match.match, no patterns |
|
480 | elif match.files() and not match.anypats(): # match.match, no patterns | |
481 | skipstep3 = True |
|
481 | skipstep3 = True | |
482 |
|
482 | |||
483 | files = set(match.files()) |
|
483 | files = set(match.files()) | |
484 | if not files or '.' in files: |
|
484 | if not files or '.' in files: | |
485 | files = [''] |
|
485 | files = [''] | |
486 | results = {'.hg': None} |
|
486 | results = {'.hg': None} | |
487 |
|
487 | |||
488 | # step 1: find all explicit files |
|
488 | # step 1: find all explicit files | |
489 | for ff in sorted(files): |
|
489 | for ff in sorted(files): | |
490 | nf = normalize(normpath(ff), False) |
|
490 | nf = normalize(normpath(ff), False) | |
491 | if nf in results: |
|
491 | if nf in results: | |
492 | continue |
|
492 | continue | |
493 |
|
493 | |||
494 | try: |
|
494 | try: | |
495 | st = lstat(join(nf)) |
|
495 | st = lstat(join(nf)) | |
496 | kind = getkind(st.st_mode) |
|
496 | kind = getkind(st.st_mode) | |
497 | if kind == dirkind: |
|
497 | if kind == dirkind: | |
498 | skipstep3 = False |
|
498 | skipstep3 = False | |
499 | if nf in dmap: |
|
499 | if nf in dmap: | |
500 | #file deleted on disk but still in dirstate |
|
500 | #file deleted on disk but still in dirstate | |
501 | results[nf] = None |
|
501 | results[nf] = None | |
502 | match.dir(nf) |
|
502 | match.dir(nf) | |
503 | if not dirignore(nf): |
|
503 | if not dirignore(nf): | |
504 | wadd(nf) |
|
504 | wadd(nf) | |
505 | elif kind == regkind or kind == lnkkind: |
|
505 | elif kind == regkind or kind == lnkkind: | |
506 | results[nf] = st |
|
506 | results[nf] = st | |
507 | else: |
|
507 | else: | |
508 | badfn(ff, badtype(kind)) |
|
508 | badfn(ff, badtype(kind)) | |
509 | if nf in dmap: |
|
509 | if nf in dmap: | |
510 | results[nf] = None |
|
510 | results[nf] = None | |
511 | except OSError, inst: |
|
511 | except OSError, inst: | |
512 | if nf in dmap: # does it exactly match a file? |
|
512 | if nf in dmap: # does it exactly match a file? | |
513 | results[nf] = None |
|
513 | results[nf] = None | |
514 | else: # does it match a directory? |
|
514 | else: # does it match a directory? | |
515 | prefix = nf + "/" |
|
515 | prefix = nf + "/" | |
516 | for fn in dmap: |
|
516 | for fn in dmap: | |
517 | if fn.startswith(prefix): |
|
517 | if fn.startswith(prefix): | |
518 | match.dir(nf) |
|
518 | match.dir(nf) | |
519 | skipstep3 = False |
|
519 | skipstep3 = False | |
520 | break |
|
520 | break | |
521 | else: |
|
521 | else: | |
522 | badfn(ff, inst.strerror) |
|
522 | badfn(ff, inst.strerror) | |
523 |
|
523 | |||
524 | # step 2: visit subdirectories |
|
524 | # step 2: visit subdirectories | |
525 | while work: |
|
525 | while work: | |
526 | nd = work.pop() |
|
526 | nd = work.pop() | |
527 | skip = None |
|
527 | skip = None | |
528 | if nd == '.': |
|
528 | if nd == '.': | |
529 | nd = '' |
|
529 | nd = '' | |
530 | else: |
|
530 | else: | |
531 | skip = '.hg' |
|
531 | skip = '.hg' | |
532 | try: |
|
532 | try: | |
533 | entries = listdir(join(nd), stat=True, skip=skip) |
|
533 | entries = listdir(join(nd), stat=True, skip=skip) | |
534 | except OSError, inst: |
|
534 | except OSError, inst: | |
535 | if inst.errno == errno.EACCES: |
|
535 | if inst.errno == errno.EACCES: | |
536 | fwarn(nd, inst.strerror) |
|
536 | fwarn(nd, inst.strerror) | |
537 | continue |
|
537 | continue | |
538 | raise |
|
538 | raise | |
539 | for f, kind, st in entries: |
|
539 | for f, kind, st in entries: | |
540 | nf = normalize(nd and (nd + "/" + f) or f, True) |
|
540 | nf = normalize(nd and (nd + "/" + f) or f, True) | |
541 | if nf not in results: |
|
541 | if nf not in results: | |
542 | if kind == dirkind: |
|
542 | if kind == dirkind: | |
543 | if not ignore(nf): |
|
543 | if not ignore(nf): | |
544 | match.dir(nf) |
|
544 | match.dir(nf) | |
545 | wadd(nf) |
|
545 | wadd(nf) | |
546 | if nf in dmap and matchfn(nf): |
|
546 | if nf in dmap and matchfn(nf): | |
547 | results[nf] = None |
|
547 | results[nf] = None | |
548 | elif kind == regkind or kind == lnkkind: |
|
548 | elif kind == regkind or kind == lnkkind: | |
549 | if nf in dmap: |
|
549 | if nf in dmap: | |
550 | if matchfn(nf): |
|
550 | if matchfn(nf): | |
551 | results[nf] = st |
|
551 | results[nf] = st | |
552 | elif matchfn(nf) and not ignore(nf): |
|
552 | elif matchfn(nf) and not ignore(nf): | |
553 | results[nf] = st |
|
553 | results[nf] = st | |
554 | elif nf in dmap and matchfn(nf): |
|
554 | elif nf in dmap and matchfn(nf): | |
555 | results[nf] = None |
|
555 | results[nf] = None | |
556 |
|
556 | |||
557 | # step 3: report unseen items in the dmap hash |
|
557 | # step 3: report unseen items in the dmap hash | |
558 | if not skipstep3 and not exact: |
|
558 | if not skipstep3 and not exact: | |
559 | visit = sorted([f for f in dmap if f not in results and matchfn(f)]) |
|
559 | visit = sorted([f for f in dmap if f not in results and matchfn(f)]) | |
560 | for nf, st in zip(visit, util.statfiles([join(i) for i in visit])): |
|
560 | for nf, st in zip(visit, util.statfiles([join(i) for i in visit])): | |
561 | if not st is None and not getkind(st.st_mode) in (regkind, lnkkind): |
|
561 | if not st is None and not getkind(st.st_mode) in (regkind, lnkkind): | |
562 | st = None |
|
562 | st = None | |
563 | results[nf] = st |
|
563 | results[nf] = st | |
564 |
|
564 | |||
565 | del results['.hg'] |
|
565 | del results['.hg'] | |
566 | return results |
|
566 | return results | |
567 |
|
567 | |||
568 | def status(self, match, ignored, clean, unknown): |
|
568 | def status(self, match, ignored, clean, unknown): | |
569 | '''Determine the status of the working copy relative to the |
|
569 | '''Determine the status of the working copy relative to the | |
570 | dirstate and return a tuple of lists (unsure, modified, added, |
|
570 | dirstate and return a tuple of lists (unsure, modified, added, | |
571 | removed, deleted, unknown, ignored, clean), where: |
|
571 | removed, deleted, unknown, ignored, clean), where: | |
572 |
|
572 | |||
573 | unsure: |
|
573 | unsure: | |
574 | files that might have been modified since the dirstate was |
|
574 | files that might have been modified since the dirstate was | |
575 | written, but need to be read to be sure (size is the same |
|
575 | written, but need to be read to be sure (size is the same | |
576 | but mtime differs) |
|
576 | but mtime differs) | |
577 | modified: |
|
577 | modified: | |
578 | files that have definitely been modified since the dirstate |
|
578 | files that have definitely been modified since the dirstate | |
579 | was written (different size or mode) |
|
579 | was written (different size or mode) | |
580 | added: |
|
580 | added: | |
581 | files that have been explicitly added with hg add |
|
581 | files that have been explicitly added with hg add | |
582 | removed: |
|
582 | removed: | |
583 | files that have been explicitly removed with hg remove |
|
583 | files that have been explicitly removed with hg remove | |
584 | deleted: |
|
584 | deleted: | |
585 | files that have been deleted through other means ("missing") |
|
585 | files that have been deleted through other means ("missing") | |
586 | unknown: |
|
586 | unknown: | |
587 | files not in the dirstate that are not ignored |
|
587 | files not in the dirstate that are not ignored | |
588 | ignored: |
|
588 | ignored: | |
589 | files not in the dirstate that are ignored |
|
589 | files not in the dirstate that are ignored | |
590 | (by _dirignore()) |
|
590 | (by _dirignore()) | |
591 | clean: |
|
591 | clean: | |
592 | files that have definitely not been modified since the |
|
592 | files that have definitely not been modified since the | |
593 | dirstate was written |
|
593 | dirstate was written | |
594 | ''' |
|
594 | ''' | |
595 | listignored, listclean, listunknown = ignored, clean, unknown |
|
595 | listignored, listclean, listunknown = ignored, clean, unknown | |
596 | lookup, modified, added, unknown, ignored = [], [], [], [], [] |
|
596 | lookup, modified, added, unknown, ignored = [], [], [], [], [] | |
597 | removed, deleted, clean = [], [], [] |
|
597 | removed, deleted, clean = [], [], [] | |
598 |
|
598 | |||
599 | dmap = self._map |
|
599 | dmap = self._map | |
600 | ladd = lookup.append # aka "unsure" |
|
600 | ladd = lookup.append # aka "unsure" | |
601 | madd = modified.append |
|
601 | madd = modified.append | |
602 | aadd = added.append |
|
602 | aadd = added.append | |
603 | uadd = unknown.append |
|
603 | uadd = unknown.append | |
604 | iadd = ignored.append |
|
604 | iadd = ignored.append | |
605 | radd = removed.append |
|
605 | radd = removed.append | |
606 | dadd = deleted.append |
|
606 | dadd = deleted.append | |
607 | cadd = clean.append |
|
607 | cadd = clean.append | |
608 |
|
608 | |||
609 | for fn, st in self.walk(match, listunknown, listignored).iteritems(): |
|
609 | for fn, st in self.walk(match, listunknown, listignored).iteritems(): | |
610 | if fn not in dmap: |
|
610 | if fn not in dmap: | |
611 | if (listignored or match.exact(fn)) and self._dirignore(fn): |
|
611 | if (listignored or match.exact(fn)) and self._dirignore(fn): | |
612 | if listignored: |
|
612 | if listignored: | |
613 | iadd(fn) |
|
613 | iadd(fn) | |
614 | elif listunknown: |
|
614 | elif listunknown: | |
615 | uadd(fn) |
|
615 | uadd(fn) | |
616 | continue |
|
616 | continue | |
617 |
|
617 | |||
618 | state, mode, size, time = dmap[fn] |
|
618 | state, mode, size, time = dmap[fn] | |
619 |
|
619 | |||
620 | if not st and state in "nma": |
|
620 | if not st and state in "nma": | |
621 | dadd(fn) |
|
621 | dadd(fn) | |
622 | elif state == 'n': |
|
622 | elif state == 'n': | |
623 | if (size >= 0 and |
|
623 | if (size >= 0 and | |
624 | (size != st.st_size |
|
624 | (size != st.st_size | |
625 | or ((mode ^ st.st_mode) & 0100 and self._checkexec)) |
|
625 | or ((mode ^ st.st_mode) & 0100 and self._checkexec)) | |
626 | or size == -2 |
|
626 | or size == -2 | |
627 | or fn in self._copymap): |
|
627 | or fn in self._copymap): | |
628 | madd(fn) |
|
628 | madd(fn) | |
629 | elif time != int(st.st_mtime): |
|
629 | elif time != int(st.st_mtime): | |
630 | ladd(fn) |
|
630 | ladd(fn) | |
631 | elif listclean: |
|
631 | elif listclean: | |
632 | cadd(fn) |
|
632 | cadd(fn) | |
633 | elif state == 'm': |
|
633 | elif state == 'm': | |
634 | madd(fn) |
|
634 | madd(fn) | |
635 | elif state == 'a': |
|
635 | elif state == 'a': | |
636 | aadd(fn) |
|
636 | aadd(fn) | |
637 | elif state == 'r': |
|
637 | elif state == 'r': | |
638 | radd(fn) |
|
638 | radd(fn) | |
639 |
|
639 | |||
640 | return (lookup, modified, added, removed, deleted, unknown, ignored, |
|
640 | return (lookup, modified, added, removed, deleted, unknown, ignored, | |
641 | clean) |
|
641 | clean) |
@@ -1,92 +1,92 b'' | |||||
1 | # help.py - help data for mercurial |
|
1 | # help.py - help data for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2006 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2006 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2, incorporated herein by reference. |
|
6 | # GNU General Public License version 2, incorporated herein by reference. | |
7 |
|
7 | |||
8 | from i18n import gettext, _ |
|
8 | from i18n import gettext, _ | |
9 | import sys, os |
|
9 | import sys, os | |
10 |
import extensions |
|
10 | import extensions | |
11 |
|
11 | |||
12 |
|
12 | |||
13 | def moduledoc(file): |
|
13 | def moduledoc(file): | |
14 | '''return the top-level python documentation for the given file |
|
14 | '''return the top-level python documentation for the given file | |
15 |
|
15 | |||
16 | Loosely inspired by pydoc.source_synopsis(), but rewritten to handle \''' |
|
16 | Loosely inspired by pydoc.source_synopsis(), but rewritten to handle \''' | |
17 | as well as """ and to return the whole text instead of just the synopsis''' |
|
17 | as well as """ and to return the whole text instead of just the synopsis''' | |
18 | result = [] |
|
18 | result = [] | |
19 |
|
19 | |||
20 | line = file.readline() |
|
20 | line = file.readline() | |
21 | while line[:1] == '#' or not line.strip(): |
|
21 | while line[:1] == '#' or not line.strip(): | |
22 | line = file.readline() |
|
22 | line = file.readline() | |
23 | if not line: break |
|
23 | if not line: break | |
24 |
|
24 | |||
25 | start = line[:3] |
|
25 | start = line[:3] | |
26 | if start == '"""' or start == "'''": |
|
26 | if start == '"""' or start == "'''": | |
27 | line = line[3:] |
|
27 | line = line[3:] | |
28 | while line: |
|
28 | while line: | |
29 | if line.rstrip().endswith(start): |
|
29 | if line.rstrip().endswith(start): | |
30 | line = line.split(start)[0] |
|
30 | line = line.split(start)[0] | |
31 | if line: |
|
31 | if line: | |
32 | result.append(line) |
|
32 | result.append(line) | |
33 | break |
|
33 | break | |
34 | elif not line: |
|
34 | elif not line: | |
35 | return None # unmatched delimiter |
|
35 | return None # unmatched delimiter | |
36 | result.append(line) |
|
36 | result.append(line) | |
37 | line = file.readline() |
|
37 | line = file.readline() | |
38 | else: |
|
38 | else: | |
39 | return None |
|
39 | return None | |
40 |
|
40 | |||
41 | return ''.join(result) |
|
41 | return ''.join(result) | |
42 |
|
42 | |||
43 | def listexts(header, exts, maxlength): |
|
43 | def listexts(header, exts, maxlength): | |
44 | '''return a text listing of the given extensions''' |
|
44 | '''return a text listing of the given extensions''' | |
45 | if not exts: |
|
45 | if not exts: | |
46 | return '' |
|
46 | return '' | |
47 | result = '\n%s\n\n' % header |
|
47 | result = '\n%s\n\n' % header | |
48 | for name, desc in sorted(exts.iteritems()): |
|
48 | for name, desc in sorted(exts.iteritems()): | |
49 | result += ' %-*s %s\n' % (maxlength + 2, ':%s:' % name, desc) |
|
49 | result += ' %-*s %s\n' % (maxlength + 2, ':%s:' % name, desc) | |
50 | return result |
|
50 | return result | |
51 |
|
51 | |||
52 | def extshelp(): |
|
52 | def extshelp(): | |
53 | doc = loaddoc('extensions')() |
|
53 | doc = loaddoc('extensions')() | |
54 |
|
54 | |||
55 | exts, maxlength = extensions.enabled() |
|
55 | exts, maxlength = extensions.enabled() | |
56 | doc += listexts(_('enabled extensions:'), exts, maxlength) |
|
56 | doc += listexts(_('enabled extensions:'), exts, maxlength) | |
57 |
|
57 | |||
58 | exts, maxlength = extensions.disabled() |
|
58 | exts, maxlength = extensions.disabled() | |
59 | doc += listexts(_('disabled extensions:'), exts, maxlength) |
|
59 | doc += listexts(_('disabled extensions:'), exts, maxlength) | |
60 |
|
60 | |||
61 | return doc |
|
61 | return doc | |
62 |
|
62 | |||
63 | def loaddoc(topic): |
|
63 | def loaddoc(topic): | |
64 | """Return a delayed loader for help/topic.txt.""" |
|
64 | """Return a delayed loader for help/topic.txt.""" | |
65 |
|
65 | |||
66 | def loader(): |
|
66 | def loader(): | |
67 | if hasattr(sys, 'frozen'): |
|
67 | if hasattr(sys, 'frozen'): | |
68 | module = sys.executable |
|
68 | module = sys.executable | |
69 | else: |
|
69 | else: | |
70 | module = __file__ |
|
70 | module = __file__ | |
71 | base = os.path.dirname(module) |
|
71 | base = os.path.dirname(module) | |
72 |
|
72 | |||
73 | for dir in ('.', '..'): |
|
73 | for dir in ('.', '..'): | |
74 | docdir = os.path.join(base, dir, 'help') |
|
74 | docdir = os.path.join(base, dir, 'help') | |
75 | if os.path.isdir(docdir): |
|
75 | if os.path.isdir(docdir): | |
76 | break |
|
76 | break | |
77 |
|
77 | |||
78 | path = os.path.join(docdir, topic + ".txt") |
|
78 | path = os.path.join(docdir, topic + ".txt") | |
79 | return gettext(open(path).read()) |
|
79 | return gettext(open(path).read()) | |
80 | return loader |
|
80 | return loader | |
81 |
|
81 | |||
82 | helptable = ( |
|
82 | helptable = ( | |
83 | (["dates"], _("Date Formats"), loaddoc('dates')), |
|
83 | (["dates"], _("Date Formats"), loaddoc('dates')), | |
84 | (["patterns"], _("File Name Patterns"), loaddoc('patterns')), |
|
84 | (["patterns"], _("File Name Patterns"), loaddoc('patterns')), | |
85 | (['environment', 'env'], _('Environment Variables'), loaddoc('environment')), |
|
85 | (['environment', 'env'], _('Environment Variables'), loaddoc('environment')), | |
86 | (['revs', 'revisions'], _('Specifying Single Revisions'), loaddoc('revisions')), |
|
86 | (['revs', 'revisions'], _('Specifying Single Revisions'), loaddoc('revisions')), | |
87 | (['mrevs', 'multirevs'], _('Specifying Multiple Revisions'), loaddoc('multirevs')), |
|
87 | (['mrevs', 'multirevs'], _('Specifying Multiple Revisions'), loaddoc('multirevs')), | |
88 | (['diffs'], _('Diff Formats'), loaddoc('diffs')), |
|
88 | (['diffs'], _('Diff Formats'), loaddoc('diffs')), | |
89 | (['templating', 'templates'], _('Template Usage'), loaddoc('templates')), |
|
89 | (['templating', 'templates'], _('Template Usage'), loaddoc('templates')), | |
90 | (['urls'], _('URL Paths'), loaddoc('urls')), |
|
90 | (['urls'], _('URL Paths'), loaddoc('urls')), | |
91 | (["extensions"], _("Using additional features"), extshelp), |
|
91 | (["extensions"], _("Using additional features"), extshelp), | |
92 | ) |
|
92 | ) |
@@ -1,341 +1,340 b'' | |||||
1 | # tags.py - read tag info from local repository |
|
1 | # tags.py - read tag info from local repository | |
2 | # |
|
2 | # | |
3 | # Copyright 2009 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2009 Matt Mackall <mpm@selenic.com> | |
4 | # Copyright 2009 Greg Ward <greg@gerg.ca> |
|
4 | # Copyright 2009 Greg Ward <greg@gerg.ca> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2, incorporated herein by reference. |
|
7 | # GNU General Public License version 2, incorporated herein by reference. | |
8 |
|
8 | |||
9 | # Currently this module only deals with reading and caching tags. |
|
9 | # Currently this module only deals with reading and caching tags. | |
10 | # Eventually, it could take care of updating (adding/removing/moving) |
|
10 | # Eventually, it could take care of updating (adding/removing/moving) | |
11 | # tags too. |
|
11 | # tags too. | |
12 |
|
12 | |||
13 | import os |
|
|||
14 | from node import nullid, bin, hex, short |
|
13 | from node import nullid, bin, hex, short | |
15 | from i18n import _ |
|
14 | from i18n import _ | |
16 | import encoding |
|
15 | import encoding | |
17 | import error |
|
16 | import error | |
18 |
|
17 | |||
19 | def _debugalways(ui, *msg): |
|
18 | def _debugalways(ui, *msg): | |
20 | ui.write(*msg) |
|
19 | ui.write(*msg) | |
21 |
|
20 | |||
22 | def _debugconditional(ui, *msg): |
|
21 | def _debugconditional(ui, *msg): | |
23 | ui.debug(*msg) |
|
22 | ui.debug(*msg) | |
24 |
|
23 | |||
25 | def _debugnever(ui, *msg): |
|
24 | def _debugnever(ui, *msg): | |
26 | pass |
|
25 | pass | |
27 |
|
26 | |||
28 | _debug = _debugalways |
|
27 | _debug = _debugalways | |
29 | _debug = _debugnever |
|
28 | _debug = _debugnever | |
30 |
|
29 | |||
31 | def findglobaltags1(ui, repo, alltags, tagtypes): |
|
30 | def findglobaltags1(ui, repo, alltags, tagtypes): | |
32 | '''Find global tags in repo by reading .hgtags from every head that |
|
31 | '''Find global tags in repo by reading .hgtags from every head that | |
33 | has a distinct version of it. Updates the dicts alltags, tagtypes |
|
32 | has a distinct version of it. Updates the dicts alltags, tagtypes | |
34 | in place: alltags maps tag name to (node, hist) pair (see _readtags() |
|
33 | in place: alltags maps tag name to (node, hist) pair (see _readtags() | |
35 | below), and tagtypes maps tag name to tag type ('global' in this |
|
34 | below), and tagtypes maps tag name to tag type ('global' in this | |
36 | case).''' |
|
35 | case).''' | |
37 |
|
36 | |||
38 | seen = set() |
|
37 | seen = set() | |
39 | fctx = None |
|
38 | fctx = None | |
40 | ctxs = [] # list of filectx |
|
39 | ctxs = [] # list of filectx | |
41 | for node in repo.heads(): |
|
40 | for node in repo.heads(): | |
42 | try: |
|
41 | try: | |
43 | fnode = repo[node].filenode('.hgtags') |
|
42 | fnode = repo[node].filenode('.hgtags') | |
44 | except error.LookupError: |
|
43 | except error.LookupError: | |
45 | continue |
|
44 | continue | |
46 | if fnode not in seen: |
|
45 | if fnode not in seen: | |
47 | seen.add(fnode) |
|
46 | seen.add(fnode) | |
48 | if not fctx: |
|
47 | if not fctx: | |
49 | fctx = repo.filectx('.hgtags', fileid=fnode) |
|
48 | fctx = repo.filectx('.hgtags', fileid=fnode) | |
50 | else: |
|
49 | else: | |
51 | fctx = fctx.filectx(fnode) |
|
50 | fctx = fctx.filectx(fnode) | |
52 | ctxs.append(fctx) |
|
51 | ctxs.append(fctx) | |
53 |
|
52 | |||
54 | # read the tags file from each head, ending with the tip |
|
53 | # read the tags file from each head, ending with the tip | |
55 | for fctx in reversed(ctxs): |
|
54 | for fctx in reversed(ctxs): | |
56 | filetags = _readtags( |
|
55 | filetags = _readtags( | |
57 | ui, repo, fctx.data().splitlines(), fctx) |
|
56 | ui, repo, fctx.data().splitlines(), fctx) | |
58 | _updatetags(filetags, "global", alltags, tagtypes) |
|
57 | _updatetags(filetags, "global", alltags, tagtypes) | |
59 |
|
58 | |||
60 | def findglobaltags2(ui, repo, alltags, tagtypes): |
|
59 | def findglobaltags2(ui, repo, alltags, tagtypes): | |
61 | '''Same as findglobaltags1(), but with caching.''' |
|
60 | '''Same as findglobaltags1(), but with caching.''' | |
62 | # This is so we can be lazy and assume alltags contains only global |
|
61 | # This is so we can be lazy and assume alltags contains only global | |
63 | # tags when we pass it to _writetagcache(). |
|
62 | # tags when we pass it to _writetagcache(). | |
64 | assert len(alltags) == len(tagtypes) == 0, \ |
|
63 | assert len(alltags) == len(tagtypes) == 0, \ | |
65 | "findglobaltags() should be called first" |
|
64 | "findglobaltags() should be called first" | |
66 |
|
65 | |||
67 | (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo) |
|
66 | (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo) | |
68 | if cachetags is not None: |
|
67 | if cachetags is not None: | |
69 | assert not shouldwrite |
|
68 | assert not shouldwrite | |
70 | # XXX is this really 100% correct? are there oddball special |
|
69 | # XXX is this really 100% correct? are there oddball special | |
71 | # cases where a global tag should outrank a local tag but won't, |
|
70 | # cases where a global tag should outrank a local tag but won't, | |
72 | # because cachetags does not contain rank info? |
|
71 | # because cachetags does not contain rank info? | |
73 | _updatetags(cachetags, 'global', alltags, tagtypes) |
|
72 | _updatetags(cachetags, 'global', alltags, tagtypes) | |
74 | return |
|
73 | return | |
75 |
|
74 | |||
76 | _debug(ui, "reading tags from %d head(s): %s\n" |
|
75 | _debug(ui, "reading tags from %d head(s): %s\n" | |
77 | % (len(heads), map(short, reversed(heads)))) |
|
76 | % (len(heads), map(short, reversed(heads)))) | |
78 | seen = set() # set of fnode |
|
77 | seen = set() # set of fnode | |
79 | fctx = None |
|
78 | fctx = None | |
80 | for head in reversed(heads): # oldest to newest |
|
79 | for head in reversed(heads): # oldest to newest | |
81 | assert head in repo.changelog.nodemap, \ |
|
80 | assert head in repo.changelog.nodemap, \ | |
82 | "tag cache returned bogus head %s" % short(head) |
|
81 | "tag cache returned bogus head %s" % short(head) | |
83 |
|
82 | |||
84 | fnode = tagfnode.get(head) |
|
83 | fnode = tagfnode.get(head) | |
85 | if fnode and fnode not in seen: |
|
84 | if fnode and fnode not in seen: | |
86 | seen.add(fnode) |
|
85 | seen.add(fnode) | |
87 | if not fctx: |
|
86 | if not fctx: | |
88 | fctx = repo.filectx('.hgtags', fileid=fnode) |
|
87 | fctx = repo.filectx('.hgtags', fileid=fnode) | |
89 | else: |
|
88 | else: | |
90 | fctx = fctx.filectx(fnode) |
|
89 | fctx = fctx.filectx(fnode) | |
91 |
|
90 | |||
92 | filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx) |
|
91 | filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx) | |
93 | _updatetags(filetags, 'global', alltags, tagtypes) |
|
92 | _updatetags(filetags, 'global', alltags, tagtypes) | |
94 |
|
93 | |||
95 | # and update the cache (if necessary) |
|
94 | # and update the cache (if necessary) | |
96 | if shouldwrite: |
|
95 | if shouldwrite: | |
97 | _writetagcache(ui, repo, heads, tagfnode, alltags) |
|
96 | _writetagcache(ui, repo, heads, tagfnode, alltags) | |
98 |
|
97 | |||
99 | # Set this to findglobaltags1 to disable tag caching. |
|
98 | # Set this to findglobaltags1 to disable tag caching. | |
100 | findglobaltags = findglobaltags2 |
|
99 | findglobaltags = findglobaltags2 | |
101 |
|
100 | |||
102 | def readlocaltags(ui, repo, alltags, tagtypes): |
|
101 | def readlocaltags(ui, repo, alltags, tagtypes): | |
103 | '''Read local tags in repo. Update alltags and tagtypes.''' |
|
102 | '''Read local tags in repo. Update alltags and tagtypes.''' | |
104 | try: |
|
103 | try: | |
105 | # localtags is in the local encoding; re-encode to UTF-8 on |
|
104 | # localtags is in the local encoding; re-encode to UTF-8 on | |
106 | # input for consistency with the rest of this module. |
|
105 | # input for consistency with the rest of this module. | |
107 | data = repo.opener("localtags").read() |
|
106 | data = repo.opener("localtags").read() | |
108 | filetags = _readtags( |
|
107 | filetags = _readtags( | |
109 | ui, repo, data.splitlines(), "localtags", |
|
108 | ui, repo, data.splitlines(), "localtags", | |
110 | recode=encoding.fromlocal) |
|
109 | recode=encoding.fromlocal) | |
111 | _updatetags(filetags, "local", alltags, tagtypes) |
|
110 | _updatetags(filetags, "local", alltags, tagtypes) | |
112 | except IOError: |
|
111 | except IOError: | |
113 | pass |
|
112 | pass | |
114 |
|
113 | |||
115 | def _readtags(ui, repo, lines, fn, recode=None): |
|
114 | def _readtags(ui, repo, lines, fn, recode=None): | |
116 | '''Read tag definitions from a file (or any source of lines). |
|
115 | '''Read tag definitions from a file (or any source of lines). | |
117 | Return a mapping from tag name to (node, hist): node is the node id |
|
116 | Return a mapping from tag name to (node, hist): node is the node id | |
118 | from the last line read for that name, and hist is the list of node |
|
117 | from the last line read for that name, and hist is the list of node | |
119 | ids previously associated with it (in file order). All node ids are |
|
118 | ids previously associated with it (in file order). All node ids are | |
120 | binary, not hex.''' |
|
119 | binary, not hex.''' | |
121 |
|
120 | |||
122 | filetags = {} # map tag name to (node, hist) |
|
121 | filetags = {} # map tag name to (node, hist) | |
123 | count = 0 |
|
122 | count = 0 | |
124 |
|
123 | |||
125 | def warn(msg): |
|
124 | def warn(msg): | |
126 | ui.warn(_("%s, line %s: %s\n") % (fn, count, msg)) |
|
125 | ui.warn(_("%s, line %s: %s\n") % (fn, count, msg)) | |
127 |
|
126 | |||
128 | for line in lines: |
|
127 | for line in lines: | |
129 | count += 1 |
|
128 | count += 1 | |
130 | if not line: |
|
129 | if not line: | |
131 | continue |
|
130 | continue | |
132 | try: |
|
131 | try: | |
133 | (nodehex, name) = line.split(" ", 1) |
|
132 | (nodehex, name) = line.split(" ", 1) | |
134 | except ValueError: |
|
133 | except ValueError: | |
135 | warn(_("cannot parse entry")) |
|
134 | warn(_("cannot parse entry")) | |
136 | continue |
|
135 | continue | |
137 | name = name.strip() |
|
136 | name = name.strip() | |
138 | if recode: |
|
137 | if recode: | |
139 | name = recode(name) |
|
138 | name = recode(name) | |
140 | try: |
|
139 | try: | |
141 | nodebin = bin(nodehex) |
|
140 | nodebin = bin(nodehex) | |
142 | except TypeError: |
|
141 | except TypeError: | |
143 | warn(_("node '%s' is not well formed") % nodehex) |
|
142 | warn(_("node '%s' is not well formed") % nodehex) | |
144 | continue |
|
143 | continue | |
145 | if nodebin not in repo.changelog.nodemap: |
|
144 | if nodebin not in repo.changelog.nodemap: | |
146 | # silently ignore as pull -r might cause this |
|
145 | # silently ignore as pull -r might cause this | |
147 | continue |
|
146 | continue | |
148 |
|
147 | |||
149 | # update filetags |
|
148 | # update filetags | |
150 | hist = [] |
|
149 | hist = [] | |
151 | if name in filetags: |
|
150 | if name in filetags: | |
152 | n, hist = filetags[name] |
|
151 | n, hist = filetags[name] | |
153 | hist.append(n) |
|
152 | hist.append(n) | |
154 | filetags[name] = (nodebin, hist) |
|
153 | filetags[name] = (nodebin, hist) | |
155 | return filetags |
|
154 | return filetags | |
156 |
|
155 | |||
157 | def _updatetags(filetags, tagtype, alltags, tagtypes): |
|
156 | def _updatetags(filetags, tagtype, alltags, tagtypes): | |
158 | '''Incorporate the tag info read from one file into the two |
|
157 | '''Incorporate the tag info read from one file into the two | |
159 | dictionaries, alltags and tagtypes, that contain all tag |
|
158 | dictionaries, alltags and tagtypes, that contain all tag | |
160 | info (global across all heads plus local).''' |
|
159 | info (global across all heads plus local).''' | |
161 |
|
160 | |||
162 | for name, nodehist in filetags.iteritems(): |
|
161 | for name, nodehist in filetags.iteritems(): | |
163 | if name not in alltags: |
|
162 | if name not in alltags: | |
164 | alltags[name] = nodehist |
|
163 | alltags[name] = nodehist | |
165 | tagtypes[name] = tagtype |
|
164 | tagtypes[name] = tagtype | |
166 | continue |
|
165 | continue | |
167 |
|
166 | |||
168 | # we prefer alltags[name] if: |
|
167 | # we prefer alltags[name] if: | |
169 | # it supercedes us OR |
|
168 | # it supercedes us OR | |
170 | # mutual supercedes and it has a higher rank |
|
169 | # mutual supercedes and it has a higher rank | |
171 | # otherwise we win because we're tip-most |
|
170 | # otherwise we win because we're tip-most | |
172 | anode, ahist = nodehist |
|
171 | anode, ahist = nodehist | |
173 | bnode, bhist = alltags[name] |
|
172 | bnode, bhist = alltags[name] | |
174 | if (bnode != anode and anode in bhist and |
|
173 | if (bnode != anode and anode in bhist and | |
175 | (bnode not in ahist or len(bhist) > len(ahist))): |
|
174 | (bnode not in ahist or len(bhist) > len(ahist))): | |
176 | anode = bnode |
|
175 | anode = bnode | |
177 | ahist.extend([n for n in bhist if n not in ahist]) |
|
176 | ahist.extend([n for n in bhist if n not in ahist]) | |
178 | alltags[name] = anode, ahist |
|
177 | alltags[name] = anode, ahist | |
179 | tagtypes[name] = tagtype |
|
178 | tagtypes[name] = tagtype | |
180 |
|
179 | |||
181 |
|
180 | |||
182 | # The tag cache only stores info about heads, not the tag contents |
|
181 | # The tag cache only stores info about heads, not the tag contents | |
183 | # from each head. I.e. it doesn't try to squeeze out the maximum |
|
182 | # from each head. I.e. it doesn't try to squeeze out the maximum | |
184 | # performance, but is simpler has a better chance of actually |
|
183 | # performance, but is simpler has a better chance of actually | |
185 | # working correctly. And this gives the biggest performance win: it |
|
184 | # working correctly. And this gives the biggest performance win: it | |
186 | # avoids looking up .hgtags in the manifest for every head, and it |
|
185 | # avoids looking up .hgtags in the manifest for every head, and it | |
187 | # can avoid calling heads() at all if there have been no changes to |
|
186 | # can avoid calling heads() at all if there have been no changes to | |
188 | # the repo. |
|
187 | # the repo. | |
189 |
|
188 | |||
190 | def _readtagcache(ui, repo): |
|
189 | def _readtagcache(ui, repo): | |
191 | '''Read the tag cache and return a tuple (heads, fnodes, cachetags, |
|
190 | '''Read the tag cache and return a tuple (heads, fnodes, cachetags, | |
192 | shouldwrite). If the cache is completely up-to-date, cachetags is a |
|
191 | shouldwrite). If the cache is completely up-to-date, cachetags is a | |
193 | dict of the form returned by _readtags(); otherwise, it is None and |
|
192 | dict of the form returned by _readtags(); otherwise, it is None and | |
194 | heads and fnodes are set. In that case, heads is the list of all |
|
193 | heads and fnodes are set. In that case, heads is the list of all | |
195 | heads currently in the repository (ordered from tip to oldest) and |
|
194 | heads currently in the repository (ordered from tip to oldest) and | |
196 | fnodes is a mapping from head to .hgtags filenode. If those two are |
|
195 | fnodes is a mapping from head to .hgtags filenode. If those two are | |
197 | set, caller is responsible for reading tag info from each head.''' |
|
196 | set, caller is responsible for reading tag info from each head.''' | |
198 |
|
197 | |||
199 | try: |
|
198 | try: | |
200 | cachefile = repo.opener('tags.cache', 'r') |
|
199 | cachefile = repo.opener('tags.cache', 'r') | |
201 | _debug(ui, 'reading tag cache from %s\n' % cachefile.name) |
|
200 | _debug(ui, 'reading tag cache from %s\n' % cachefile.name) | |
202 | except IOError: |
|
201 | except IOError: | |
203 | cachefile = None |
|
202 | cachefile = None | |
204 |
|
203 | |||
205 | # The cache file consists of lines like |
|
204 | # The cache file consists of lines like | |
206 | # <headrev> <headnode> [<tagnode>] |
|
205 | # <headrev> <headnode> [<tagnode>] | |
207 | # where <headrev> and <headnode> redundantly identify a repository |
|
206 | # where <headrev> and <headnode> redundantly identify a repository | |
208 | # head from the time the cache was written, and <tagnode> is the |
|
207 | # head from the time the cache was written, and <tagnode> is the | |
209 | # filenode of .hgtags on that head. Heads with no .hgtags file will |
|
208 | # filenode of .hgtags on that head. Heads with no .hgtags file will | |
210 | # have no <tagnode>. The cache is ordered from tip to oldest (which |
|
209 | # have no <tagnode>. The cache is ordered from tip to oldest (which | |
211 | # is part of why <headrev> is there: a quick visual check is all |
|
210 | # is part of why <headrev> is there: a quick visual check is all | |
212 | # that's required to ensure correct order). |
|
211 | # that's required to ensure correct order). | |
213 | # |
|
212 | # | |
214 | # This information is enough to let us avoid the most expensive part |
|
213 | # This information is enough to let us avoid the most expensive part | |
215 | # of finding global tags, which is looking up <tagnode> in the |
|
214 | # of finding global tags, which is looking up <tagnode> in the | |
216 | # manifest for each head. |
|
215 | # manifest for each head. | |
217 | cacherevs = [] # list of headrev |
|
216 | cacherevs = [] # list of headrev | |
218 | cacheheads = [] # list of headnode |
|
217 | cacheheads = [] # list of headnode | |
219 | cachefnode = {} # map headnode to filenode |
|
218 | cachefnode = {} # map headnode to filenode | |
220 | if cachefile: |
|
219 | if cachefile: | |
221 | for line in cachefile: |
|
220 | for line in cachefile: | |
222 | if line == "\n": |
|
221 | if line == "\n": | |
223 | break |
|
222 | break | |
224 | line = line.rstrip().split() |
|
223 | line = line.rstrip().split() | |
225 | cacherevs.append(int(line[0])) |
|
224 | cacherevs.append(int(line[0])) | |
226 | headnode = bin(line[1]) |
|
225 | headnode = bin(line[1]) | |
227 | cacheheads.append(headnode) |
|
226 | cacheheads.append(headnode) | |
228 | if len(line) == 3: |
|
227 | if len(line) == 3: | |
229 | fnode = bin(line[2]) |
|
228 | fnode = bin(line[2]) | |
230 | cachefnode[headnode] = fnode |
|
229 | cachefnode[headnode] = fnode | |
231 |
|
230 | |||
232 | tipnode = repo.changelog.tip() |
|
231 | tipnode = repo.changelog.tip() | |
233 | tiprev = len(repo.changelog) - 1 |
|
232 | tiprev = len(repo.changelog) - 1 | |
234 |
|
233 | |||
235 | # Case 1 (common): tip is the same, so nothing has changed. |
|
234 | # Case 1 (common): tip is the same, so nothing has changed. | |
236 | # (Unchanged tip trivially means no changesets have been added. |
|
235 | # (Unchanged tip trivially means no changesets have been added. | |
237 | # But, thanks to localrepository.destroyed(), it also means none |
|
236 | # But, thanks to localrepository.destroyed(), it also means none | |
238 | # have been destroyed by strip or rollback.) |
|
237 | # have been destroyed by strip or rollback.) | |
239 | if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev: |
|
238 | if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev: | |
240 | _debug(ui, "tag cache: tip unchanged\n") |
|
239 | _debug(ui, "tag cache: tip unchanged\n") | |
241 | tags = _readtags(ui, repo, cachefile, cachefile.name) |
|
240 | tags = _readtags(ui, repo, cachefile, cachefile.name) | |
242 | cachefile.close() |
|
241 | cachefile.close() | |
243 | return (None, None, tags, False) |
|
242 | return (None, None, tags, False) | |
244 | if cachefile: |
|
243 | if cachefile: | |
245 | cachefile.close() # ignore rest of file |
|
244 | cachefile.close() # ignore rest of file | |
246 |
|
245 | |||
247 | repoheads = repo.heads() |
|
246 | repoheads = repo.heads() | |
248 | # Case 2 (uncommon): empty repo; get out quickly and don't bother |
|
247 | # Case 2 (uncommon): empty repo; get out quickly and don't bother | |
249 | # writing an empty cache. |
|
248 | # writing an empty cache. | |
250 | if repoheads == [nullid]: |
|
249 | if repoheads == [nullid]: | |
251 | return ([], {}, {}, False) |
|
250 | return ([], {}, {}, False) | |
252 |
|
251 | |||
253 | # Case 3 (uncommon): cache file missing or empty. |
|
252 | # Case 3 (uncommon): cache file missing or empty. | |
254 | if not cacheheads: |
|
253 | if not cacheheads: | |
255 | _debug(ui, 'tag cache: cache file missing or empty\n') |
|
254 | _debug(ui, 'tag cache: cache file missing or empty\n') | |
256 |
|
255 | |||
257 | # Case 4 (uncommon): tip rev decreased. This should only happen |
|
256 | # Case 4 (uncommon): tip rev decreased. This should only happen | |
258 | # when we're called from localrepository.destroyed(). Refresh the |
|
257 | # when we're called from localrepository.destroyed(). Refresh the | |
259 | # cache so future invocations will not see disappeared heads in the |
|
258 | # cache so future invocations will not see disappeared heads in the | |
260 | # cache. |
|
259 | # cache. | |
261 | elif cacheheads and tiprev < cacherevs[0]: |
|
260 | elif cacheheads and tiprev < cacherevs[0]: | |
262 | _debug(ui, |
|
261 | _debug(ui, | |
263 | 'tag cache: tip rev decremented (from %d to %d), ' |
|
262 | 'tag cache: tip rev decremented (from %d to %d), ' | |
264 | 'so we must be destroying nodes\n' |
|
263 | 'so we must be destroying nodes\n' | |
265 | % (cacherevs[0], tiprev)) |
|
264 | % (cacherevs[0], tiprev)) | |
266 |
|
265 | |||
267 | # Case 5 (common): tip has changed, so we've added/replaced heads. |
|
266 | # Case 5 (common): tip has changed, so we've added/replaced heads. | |
268 | else: |
|
267 | else: | |
269 | _debug(ui, |
|
268 | _debug(ui, | |
270 | 'tag cache: tip has changed (%d:%s); must find new heads\n' |
|
269 | 'tag cache: tip has changed (%d:%s); must find new heads\n' | |
271 | % (tiprev, short(tipnode))) |
|
270 | % (tiprev, short(tipnode))) | |
272 |
|
271 | |||
273 | # Luckily, the code to handle cases 3, 4, 5 is the same. So the |
|
272 | # Luckily, the code to handle cases 3, 4, 5 is the same. So the | |
274 | # above if/elif/else can disappear once we're confident this thing |
|
273 | # above if/elif/else can disappear once we're confident this thing | |
275 | # actually works and we don't need the debug output. |
|
274 | # actually works and we don't need the debug output. | |
276 |
|
275 | |||
277 | # N.B. in case 4 (nodes destroyed), "new head" really means "newly |
|
276 | # N.B. in case 4 (nodes destroyed), "new head" really means "newly | |
278 | # exposed". |
|
277 | # exposed". | |
279 | newheads = [head |
|
278 | newheads = [head | |
280 | for head in repoheads |
|
279 | for head in repoheads | |
281 | if head not in set(cacheheads)] |
|
280 | if head not in set(cacheheads)] | |
282 | _debug(ui, 'tag cache: found %d head(s) not in cache: %s\n' |
|
281 | _debug(ui, 'tag cache: found %d head(s) not in cache: %s\n' | |
283 | % (len(newheads), map(short, newheads))) |
|
282 | % (len(newheads), map(short, newheads))) | |
284 |
|
283 | |||
285 | # Now we have to lookup the .hgtags filenode for every new head. |
|
284 | # Now we have to lookup the .hgtags filenode for every new head. | |
286 | # This is the most expensive part of finding tags, so performance |
|
285 | # This is the most expensive part of finding tags, so performance | |
287 | # depends primarily on the size of newheads. Worst case: no cache |
|
286 | # depends primarily on the size of newheads. Worst case: no cache | |
288 | # file, so newheads == repoheads. |
|
287 | # file, so newheads == repoheads. | |
289 | for head in newheads: |
|
288 | for head in newheads: | |
290 | cctx = repo[head] |
|
289 | cctx = repo[head] | |
291 | try: |
|
290 | try: | |
292 | fnode = cctx.filenode('.hgtags') |
|
291 | fnode = cctx.filenode('.hgtags') | |
293 | cachefnode[head] = fnode |
|
292 | cachefnode[head] = fnode | |
294 | except error.LookupError: |
|
293 | except error.LookupError: | |
295 | # no .hgtags file on this head |
|
294 | # no .hgtags file on this head | |
296 | pass |
|
295 | pass | |
297 |
|
296 | |||
298 | # Caller has to iterate over all heads, but can use the filenodes in |
|
297 | # Caller has to iterate over all heads, but can use the filenodes in | |
299 | # cachefnode to get to each .hgtags revision quickly. |
|
298 | # cachefnode to get to each .hgtags revision quickly. | |
300 | return (repoheads, cachefnode, None, True) |
|
299 | return (repoheads, cachefnode, None, True) | |
301 |
|
300 | |||
302 | def _writetagcache(ui, repo, heads, tagfnode, cachetags): |
|
301 | def _writetagcache(ui, repo, heads, tagfnode, cachetags): | |
303 |
|
302 | |||
304 | try: |
|
303 | try: | |
305 | cachefile = repo.opener('tags.cache', 'w', atomictemp=True) |
|
304 | cachefile = repo.opener('tags.cache', 'w', atomictemp=True) | |
306 | except (OSError, IOError): |
|
305 | except (OSError, IOError): | |
307 | return |
|
306 | return | |
308 | _debug(ui, 'writing cache file %s\n' % cachefile.name) |
|
307 | _debug(ui, 'writing cache file %s\n' % cachefile.name) | |
309 |
|
308 | |||
310 | realheads = repo.heads() # for sanity checks below |
|
309 | realheads = repo.heads() # for sanity checks below | |
311 | for head in heads: |
|
310 | for head in heads: | |
312 | # temporary sanity checks; these can probably be removed |
|
311 | # temporary sanity checks; these can probably be removed | |
313 | # once this code has been in crew for a few weeks |
|
312 | # once this code has been in crew for a few weeks | |
314 | assert head in repo.changelog.nodemap, \ |
|
313 | assert head in repo.changelog.nodemap, \ | |
315 | 'trying to write non-existent node %s to tag cache' % short(head) |
|
314 | 'trying to write non-existent node %s to tag cache' % short(head) | |
316 | assert head in realheads, \ |
|
315 | assert head in realheads, \ | |
317 | 'trying to write non-head %s to tag cache' % short(head) |
|
316 | 'trying to write non-head %s to tag cache' % short(head) | |
318 | assert head != nullid, \ |
|
317 | assert head != nullid, \ | |
319 | 'trying to write nullid to tag cache' |
|
318 | 'trying to write nullid to tag cache' | |
320 |
|
319 | |||
321 | # This can't fail because of the first assert above. When/if we |
|
320 | # This can't fail because of the first assert above. When/if we | |
322 | # remove that assert, we might want to catch LookupError here |
|
321 | # remove that assert, we might want to catch LookupError here | |
323 | # and downgrade it to a warning. |
|
322 | # and downgrade it to a warning. | |
324 | rev = repo.changelog.rev(head) |
|
323 | rev = repo.changelog.rev(head) | |
325 |
|
324 | |||
326 | fnode = tagfnode.get(head) |
|
325 | fnode = tagfnode.get(head) | |
327 | if fnode: |
|
326 | if fnode: | |
328 | cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode))) |
|
327 | cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode))) | |
329 | else: |
|
328 | else: | |
330 | cachefile.write('%d %s\n' % (rev, hex(head))) |
|
329 | cachefile.write('%d %s\n' % (rev, hex(head))) | |
331 |
|
330 | |||
332 | # Tag names in the cache are in UTF-8 -- which is the whole reason |
|
331 | # Tag names in the cache are in UTF-8 -- which is the whole reason | |
333 | # we keep them in UTF-8 throughout this module. If we converted |
|
332 | # we keep them in UTF-8 throughout this module. If we converted | |
334 | # them local encoding on input, we would lose info writing them to |
|
333 | # them local encoding on input, we would lose info writing them to | |
335 | # the cache. |
|
334 | # the cache. | |
336 | cachefile.write('\n') |
|
335 | cachefile.write('\n') | |
337 | for (name, (node, hist)) in cachetags.iteritems(): |
|
336 | for (name, (node, hist)) in cachetags.iteritems(): | |
338 | cachefile.write("%s %s\n" % (hex(node), name)) |
|
337 | cachefile.write("%s %s\n" % (hex(node), name)) | |
339 |
|
338 | |||
340 | cachefile.rename() |
|
339 | cachefile.rename() | |
341 | cachefile.close() |
|
340 | cachefile.close() |
General Comments 0
You need to be logged in to leave comments.
Login now