##// END OF EJS Templates
tags: improve documentation...
Gregory Szorc -
r24445:c71edbaf default
parent child Browse files
Show More
@@ -1,348 +1,391
1 # tags.py - read tag info from local repository
1 # tags.py - read tag info from local repository
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 # Currently this module only deals with reading and caching tags.
9 # Currently this module only deals with reading and caching tags.
10 # Eventually, it could take care of updating (adding/removing/moving)
10 # Eventually, it could take care of updating (adding/removing/moving)
11 # tags too.
11 # tags too.
12
12
13 from node import nullid, bin, hex, short
13 from node import nullid, bin, hex, short
14 from i18n import _
14 from i18n import _
15 import util
15 import util
16 import encoding
16 import encoding
17 import error
17 import error
18 import errno
18 import errno
19 import time
19 import time
20
20
21 # The tags cache stores information about heads and the history of tags.
22 #
23 # The cache file consists of two parts. The first part maps head nodes
24 # to .hgtags filenodes. The second part is a history of tags. The two
25 # parts are separated by an empty line.
26 #
27 # The first part consists of lines of the form:
28 #
29 # <headrev> <headnode> [<hgtagsnode>]
30 #
31 # <headrev> is an integer revision and <headnode> is a 40 character hex
32 # node for that changeset. These redundantly identify a repository
33 # head from the time the cache was written.
34 #
35 # <tagnode> is the filenode of .hgtags on that head. Heads with no .hgtags
36 # file will have no <hgtagsnode> (just 2 values per line).
37 #
38 # The filenode cache is ordered from tip to oldest (which is part of why
39 # <headrev> is there: a quick check of the tip from when the cache was
40 # written against the current tip is all that is needed to check whether
41 # the cache is up to date).
42 #
43 # The purpose of the filenode cache is to avoid the most expensive part
44 # of finding global tags, which is looking up the .hgtags filenode in the
45 # manifest for each head. This can take over a minute on repositories
46 # that have large manifests and many heads.
47 #
48 # The second part of the tags cache consists of lines of the form:
49 #
50 # <node> <tag>
51 #
52 # (This format is identical to that of .hgtags files.)
53 #
54 # <tag> is the tag name and <node> is the 40 character hex changeset
55 # the tag is associated with.
56 #
57 # Tags are written sorted by tag name.
58 #
59 # Tags associated with multiple changesets have an entry for each changeset.
60 # The most recent changeset (in terms of revlog ordering for the head
61 # setting it) for each tag is last.
62
21 def findglobaltags(ui, repo, alltags, tagtypes):
63 def findglobaltags(ui, repo, alltags, tagtypes):
22 '''Find global tags in repo by reading .hgtags from every head that
64 '''Find global tags in a repo.
23 has a distinct version of it, using a cache to avoid excess work.
65
24 Updates the dicts alltags, tagtypes in place: alltags maps tag name
66 "alltags" maps tag name to (node, hist) 2-tuples.
25 to (node, hist) pair (see _readtags() below), and tagtypes maps tag
67
26 name to tag type ("global" in this case).'''
68 "tagtypes" maps tag name to tag type. Global tags always have the
69 "global" tag type.
70
71 The "alltags" and "tagtypes" dicts are updated in place. Empty dicts
72 should be passed in.
73
74 The tags cache is read and updated as a side-effect of calling.
75 '''
27 # This is so we can be lazy and assume alltags contains only global
76 # This is so we can be lazy and assume alltags contains only global
28 # tags when we pass it to _writetagcache().
77 # tags when we pass it to _writetagcache().
29 assert len(alltags) == len(tagtypes) == 0, \
78 assert len(alltags) == len(tagtypes) == 0, \
30 "findglobaltags() should be called first"
79 "findglobaltags() should be called first"
31
80
32 (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo)
81 (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo)
33 if cachetags is not None:
82 if cachetags is not None:
34 assert not shouldwrite
83 assert not shouldwrite
35 # XXX is this really 100% correct? are there oddball special
84 # XXX is this really 100% correct? are there oddball special
36 # cases where a global tag should outrank a local tag but won't,
85 # cases where a global tag should outrank a local tag but won't,
37 # because cachetags does not contain rank info?
86 # because cachetags does not contain rank info?
38 _updatetags(cachetags, 'global', alltags, tagtypes)
87 _updatetags(cachetags, 'global', alltags, tagtypes)
39 return
88 return
40
89
41 seen = set() # set of fnode
90 seen = set() # set of fnode
42 fctx = None
91 fctx = None
43 for head in reversed(heads): # oldest to newest
92 for head in reversed(heads): # oldest to newest
44 assert head in repo.changelog.nodemap, \
93 assert head in repo.changelog.nodemap, \
45 "tag cache returned bogus head %s" % short(head)
94 "tag cache returned bogus head %s" % short(head)
46
95
47 fnode = tagfnode.get(head)
96 fnode = tagfnode.get(head)
48 if fnode and fnode not in seen:
97 if fnode and fnode not in seen:
49 seen.add(fnode)
98 seen.add(fnode)
50 if not fctx:
99 if not fctx:
51 fctx = repo.filectx('.hgtags', fileid=fnode)
100 fctx = repo.filectx('.hgtags', fileid=fnode)
52 else:
101 else:
53 fctx = fctx.filectx(fnode)
102 fctx = fctx.filectx(fnode)
54
103
55 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
104 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
56 _updatetags(filetags, 'global', alltags, tagtypes)
105 _updatetags(filetags, 'global', alltags, tagtypes)
57
106
58 # and update the cache (if necessary)
107 # and update the cache (if necessary)
59 if shouldwrite:
108 if shouldwrite:
60 _writetagcache(ui, repo, heads, tagfnode, alltags)
109 _writetagcache(ui, repo, heads, tagfnode, alltags)
61
110
62 def readlocaltags(ui, repo, alltags, tagtypes):
111 def readlocaltags(ui, repo, alltags, tagtypes):
63 '''Read local tags in repo. Update alltags and tagtypes.'''
112 '''Read local tags in repo. Update alltags and tagtypes.'''
64 try:
113 try:
65 data = repo.vfs.read("localtags")
114 data = repo.vfs.read("localtags")
66 except IOError, inst:
115 except IOError, inst:
67 if inst.errno != errno.ENOENT:
116 if inst.errno != errno.ENOENT:
68 raise
117 raise
69 return
118 return
70
119
71 # localtags is in the local encoding; re-encode to UTF-8 on
120 # localtags is in the local encoding; re-encode to UTF-8 on
72 # input for consistency with the rest of this module.
121 # input for consistency with the rest of this module.
73 filetags = _readtags(
122 filetags = _readtags(
74 ui, repo, data.splitlines(), "localtags",
123 ui, repo, data.splitlines(), "localtags",
75 recode=encoding.fromlocal)
124 recode=encoding.fromlocal)
76
125
77 # remove tags pointing to invalid nodes
126 # remove tags pointing to invalid nodes
78 cl = repo.changelog
127 cl = repo.changelog
79 for t in filetags.keys():
128 for t in filetags.keys():
80 try:
129 try:
81 cl.rev(filetags[t][0])
130 cl.rev(filetags[t][0])
82 except (LookupError, ValueError):
131 except (LookupError, ValueError):
83 del filetags[t]
132 del filetags[t]
84
133
85 _updatetags(filetags, "local", alltags, tagtypes)
134 _updatetags(filetags, "local", alltags, tagtypes)
86
135
87 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
136 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
88 '''Read tag definitions from a file (or any source of lines).
137 '''Read tag definitions from a file (or any source of lines).
138
89 This function returns two sortdicts with similar information:
139 This function returns two sortdicts with similar information:
140
90 - the first dict, bintaghist, contains the tag information as expected by
141 - the first dict, bintaghist, contains the tag information as expected by
91 the _readtags function, i.e. a mapping from tag name to (node, hist):
142 the _readtags function, i.e. a mapping from tag name to (node, hist):
92 - node is the node id from the last line read for that name,
143 - node is the node id from the last line read for that name,
93 - hist is the list of node ids previously associated with it (in file
144 - hist is the list of node ids previously associated with it (in file
94 order). All node ids are binary, not hex.
145 order). All node ids are binary, not hex.
146
95 - the second dict, hextaglines, is a mapping from tag name to a list of
147 - the second dict, hextaglines, is a mapping from tag name to a list of
96 [hexnode, line number] pairs, ordered from the oldest to the newest node.
148 [hexnode, line number] pairs, ordered from the oldest to the newest node.
149
97 When calcnodelines is False the hextaglines dict is not calculated (an
150 When calcnodelines is False the hextaglines dict is not calculated (an
98 empty dict is returned). This is done to improve this function's
151 empty dict is returned). This is done to improve this function's
99 performance in cases where the line numbers are not needed.
152 performance in cases where the line numbers are not needed.
100 '''
153 '''
101
154
102 bintaghist = util.sortdict()
155 bintaghist = util.sortdict()
103 hextaglines = util.sortdict()
156 hextaglines = util.sortdict()
104 count = 0
157 count = 0
105
158
106 def warn(msg):
159 def warn(msg):
107 ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
160 ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
108
161
109 for nline, line in enumerate(lines):
162 for nline, line in enumerate(lines):
110 count += 1
163 count += 1
111 if not line:
164 if not line:
112 continue
165 continue
113 try:
166 try:
114 (nodehex, name) = line.split(" ", 1)
167 (nodehex, name) = line.split(" ", 1)
115 except ValueError:
168 except ValueError:
116 warn(_("cannot parse entry"))
169 warn(_("cannot parse entry"))
117 continue
170 continue
118 name = name.strip()
171 name = name.strip()
119 if recode:
172 if recode:
120 name = recode(name)
173 name = recode(name)
121 try:
174 try:
122 nodebin = bin(nodehex)
175 nodebin = bin(nodehex)
123 except TypeError:
176 except TypeError:
124 warn(_("node '%s' is not well formed") % nodehex)
177 warn(_("node '%s' is not well formed") % nodehex)
125 continue
178 continue
126
179
127 # update filetags
180 # update filetags
128 if calcnodelines:
181 if calcnodelines:
129 # map tag name to a list of line numbers
182 # map tag name to a list of line numbers
130 if name not in hextaglines:
183 if name not in hextaglines:
131 hextaglines[name] = []
184 hextaglines[name] = []
132 hextaglines[name].append([nodehex, nline])
185 hextaglines[name].append([nodehex, nline])
133 continue
186 continue
134 # map tag name to (node, hist)
187 # map tag name to (node, hist)
135 if name not in bintaghist:
188 if name not in bintaghist:
136 bintaghist[name] = []
189 bintaghist[name] = []
137 bintaghist[name].append(nodebin)
190 bintaghist[name].append(nodebin)
138 return bintaghist, hextaglines
191 return bintaghist, hextaglines
139
192
140 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
193 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
141 '''Read tag definitions from a file (or any source of lines).
194 '''Read tag definitions from a file (or any source of lines).
142 Return a mapping from tag name to (node, hist): node is the node id
195
143 from the last line read for that name, and hist is the list of node
196 Returns a mapping from tag name to (node, hist).
144 ids previously associated with it (in file order). All node ids are
197
145 binary, not hex.'''
198 "node" is the node id from the last line read for that name. "hist"
199 is the list of node ids previously associated with it (in file order).
200 All node ids are binary, not hex.
201 '''
146 filetags, nodelines = _readtaghist(ui, repo, lines, fn, recode=recode,
202 filetags, nodelines = _readtaghist(ui, repo, lines, fn, recode=recode,
147 calcnodelines=calcnodelines)
203 calcnodelines=calcnodelines)
148 for tag, taghist in filetags.items():
204 for tag, taghist in filetags.items():
149 filetags[tag] = (taghist[-1], taghist[:-1])
205 filetags[tag] = (taghist[-1], taghist[:-1])
150 return filetags
206 return filetags
151
207
152 def _updatetags(filetags, tagtype, alltags, tagtypes):
208 def _updatetags(filetags, tagtype, alltags, tagtypes):
153 '''Incorporate the tag info read from one file into the two
209 '''Incorporate the tag info read from one file into the two
154 dictionaries, alltags and tagtypes, that contain all tag
210 dictionaries, alltags and tagtypes, that contain all tag
155 info (global across all heads plus local).'''
211 info (global across all heads plus local).'''
156
212
157 for name, nodehist in filetags.iteritems():
213 for name, nodehist in filetags.iteritems():
158 if name not in alltags:
214 if name not in alltags:
159 alltags[name] = nodehist
215 alltags[name] = nodehist
160 tagtypes[name] = tagtype
216 tagtypes[name] = tagtype
161 continue
217 continue
162
218
163 # we prefer alltags[name] if:
219 # we prefer alltags[name] if:
164 # it supersedes us OR
220 # it supersedes us OR
165 # mutual supersedes and it has a higher rank
221 # mutual supersedes and it has a higher rank
166 # otherwise we win because we're tip-most
222 # otherwise we win because we're tip-most
167 anode, ahist = nodehist
223 anode, ahist = nodehist
168 bnode, bhist = alltags[name]
224 bnode, bhist = alltags[name]
169 if (bnode != anode and anode in bhist and
225 if (bnode != anode and anode in bhist and
170 (bnode not in ahist or len(bhist) > len(ahist))):
226 (bnode not in ahist or len(bhist) > len(ahist))):
171 anode = bnode
227 anode = bnode
172 else:
228 else:
173 tagtypes[name] = tagtype
229 tagtypes[name] = tagtype
174 ahist.extend([n for n in bhist if n not in ahist])
230 ahist.extend([n for n in bhist if n not in ahist])
175 alltags[name] = anode, ahist
231 alltags[name] = anode, ahist
176
232
233 def _readtagcache(ui, repo):
234 '''Read the tag cache.
177
235
178 # The tag cache only stores info about heads, not the tag contents
236 Returns a tuple (heads, fnodes, cachetags, shouldwrite).
179 # from each head. I.e. it doesn't try to squeeze out the maximum
237
180 # performance, but is simpler has a better chance of actually
238 If the cache is completely up-to-date, "cachetags" is a dict of the
181 # working correctly. And this gives the biggest performance win: it
239 form returned by _readtags() and "heads" and "fnodes" are None and
182 # avoids looking up .hgtags in the manifest for every head, and it
240 "shouldwrite" is False.
183 # can avoid calling heads() at all if there have been no changes to
184 # the repo.
185
241
186 def _readtagcache(ui, repo):
242 If the cache is not up to date, "cachetags" is None. "heads" is a list
187 '''Read the tag cache and return a tuple (heads, fnodes, cachetags,
243 of all heads currently in the repository, ordered from tip to oldest.
188 shouldwrite). If the cache is completely up-to-date, cachetags is a
244 "fnodes" is a mapping from head to .hgtags filenode. "shouldwrite" is
189 dict of the form returned by _readtags(); otherwise, it is None and
245 True.
190 heads and fnodes are set. In that case, heads is the list of all
246
191 heads currently in the repository (ordered from tip to oldest) and
247 If the cache is not up to date, the caller is responsible for reading tag
192 fnodes is a mapping from head to .hgtags filenode. If those two are
248 info from each returned head. (See findglobaltags().)
193 set, caller is responsible for reading tag info from each head.'''
249 '''
194
250
195 try:
251 try:
196 cachefile = repo.vfs('cache/tags', 'r')
252 cachefile = repo.vfs('cache/tags', 'r')
197 # force reading the file for static-http
253 # force reading the file for static-http
198 cachelines = iter(cachefile)
254 cachelines = iter(cachefile)
199 except IOError:
255 except IOError:
200 cachefile = None
256 cachefile = None
201
257
202 # The cache file consists of lines like
258 cacherevs = [] # list of headrev
203 # <headrev> <headnode> [<tagnode>]
259 cacheheads = [] # list of headnode
204 # where <headrev> and <headnode> redundantly identify a repository
260 cachefnode = {} # map headnode to filenode
205 # head from the time the cache was written, and <tagnode> is the
206 # filenode of .hgtags on that head. Heads with no .hgtags file will
207 # have no <tagnode>. The cache is ordered from tip to oldest (which
208 # is part of why <headrev> is there: a quick visual check is all
209 # that's required to ensure correct order).
210 #
211 # This information is enough to let us avoid the most expensive part
212 # of finding global tags, which is looking up <tagnode> in the
213 # manifest for each head.
214 cacherevs = [] # list of headrev
215 cacheheads = [] # list of headnode
216 cachefnode = {} # map headnode to filenode
217 if cachefile:
261 if cachefile:
218 try:
262 try:
219 for line in cachelines:
263 for line in cachelines:
220 if line == "\n":
264 if line == "\n":
221 break
265 break
222 line = line.split()
266 line = line.split()
223 cacherevs.append(int(line[0]))
267 cacherevs.append(int(line[0]))
224 headnode = bin(line[1])
268 headnode = bin(line[1])
225 cacheheads.append(headnode)
269 cacheheads.append(headnode)
226 if len(line) == 3:
270 if len(line) == 3:
227 fnode = bin(line[2])
271 fnode = bin(line[2])
228 cachefnode[headnode] = fnode
272 cachefnode[headnode] = fnode
229 except Exception:
273 except Exception:
230 # corruption of the tags cache, just recompute it
274 # corruption of the tags cache, just recompute it
231 ui.warn(_('.hg/cache/tags is corrupt, rebuilding it\n'))
275 ui.warn(_('.hg/cache/tags is corrupt, rebuilding it\n'))
232 cacheheads = []
276 cacheheads = []
233 cacherevs = []
277 cacherevs = []
234 cachefnode = {}
278 cachefnode = {}
235
279
236 tipnode = repo.changelog.tip()
280 tipnode = repo.changelog.tip()
237 tiprev = len(repo.changelog) - 1
281 tiprev = len(repo.changelog) - 1
238
282
239 # Case 1 (common): tip is the same, so nothing has changed.
283 # Case 1 (common): tip is the same, so nothing has changed.
240 # (Unchanged tip trivially means no changesets have been added.
284 # (Unchanged tip trivially means no changesets have been added.
241 # But, thanks to localrepository.destroyed(), it also means none
285 # But, thanks to localrepository.destroyed(), it also means none
242 # have been destroyed by strip or rollback.)
286 # have been destroyed by strip or rollback.)
243 if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev:
287 if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev:
244 tags = _readtags(ui, repo, cachelines, cachefile.name)
288 tags = _readtags(ui, repo, cachelines, cachefile.name)
245 cachefile.close()
289 cachefile.close()
246 return (None, None, tags, False)
290 return (None, None, tags, False)
247 if cachefile:
291 if cachefile:
248 cachefile.close() # ignore rest of file
292 cachefile.close() # ignore rest of file
249
293
250 repoheads = repo.heads()
294 repoheads = repo.heads()
251 # Case 2 (uncommon): empty repo; get out quickly and don't bother
295 # Case 2 (uncommon): empty repo; get out quickly and don't bother
252 # writing an empty cache.
296 # writing an empty cache.
253 if repoheads == [nullid]:
297 if repoheads == [nullid]:
254 return ([], {}, {}, False)
298 return ([], {}, {}, False)
255
299
256 # Case 3 (uncommon): cache file missing or empty.
300 # Case 3 (uncommon): cache file missing or empty.
257
301
258 # Case 4 (uncommon): tip rev decreased. This should only happen
302 # Case 4 (uncommon): tip rev decreased. This should only happen
259 # when we're called from localrepository.destroyed(). Refresh the
303 # when we're called from localrepository.destroyed(). Refresh the
260 # cache so future invocations will not see disappeared heads in the
304 # cache so future invocations will not see disappeared heads in the
261 # cache.
305 # cache.
262
306
263 # Case 5 (common): tip has changed, so we've added/replaced heads.
307 # Case 5 (common): tip has changed, so we've added/replaced heads.
264
308
265 # As it happens, the code to handle cases 3, 4, 5 is the same.
309 # As it happens, the code to handle cases 3, 4, 5 is the same.
266
310
267 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
311 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
268 # exposed".
312 # exposed".
269 if not len(repo.file('.hgtags')):
313 if not len(repo.file('.hgtags')):
270 # No tags have ever been committed, so we can avoid a
314 # No tags have ever been committed, so we can avoid a
271 # potentially expensive search.
315 # potentially expensive search.
272 return (repoheads, cachefnode, None, True)
316 return (repoheads, cachefnode, None, True)
273
317
274 starttime = time.time()
318 starttime = time.time()
275
319
276 newheads = [head
320 newheads = [head
277 for head in repoheads
321 for head in repoheads
278 if head not in set(cacheheads)]
322 if head not in set(cacheheads)]
279
323
280 # Now we have to lookup the .hgtags filenode for every new head.
324 # Now we have to lookup the .hgtags filenode for every new head.
281 # This is the most expensive part of finding tags, so performance
325 # This is the most expensive part of finding tags, so performance
282 # depends primarily on the size of newheads. Worst case: no cache
326 # depends primarily on the size of newheads. Worst case: no cache
283 # file, so newheads == repoheads.
327 # file, so newheads == repoheads.
284 for head in reversed(newheads):
328 for head in reversed(newheads):
285 cctx = repo[head]
329 cctx = repo[head]
286 try:
330 try:
287 fnode = cctx.filenode('.hgtags')
331 fnode = cctx.filenode('.hgtags')
288 cachefnode[head] = fnode
332 cachefnode[head] = fnode
289 except error.LookupError:
333 except error.LookupError:
290 # no .hgtags file on this head
334 # no .hgtags file on this head
291 pass
335 pass
292
336
293 duration = time.time() - starttime
337 duration = time.time() - starttime
294 ui.log('tagscache',
338 ui.log('tagscache',
295 'resolved %d tags cache entries from %d manifests in %0.4f '
339 'resolved %d tags cache entries from %d manifests in %0.4f '
296 'seconds\n',
340 'seconds\n',
297 len(cachefnode), len(newheads), duration)
341 len(cachefnode), len(newheads), duration)
298
342
299 # Caller has to iterate over all heads, but can use the filenodes in
343 # Caller has to iterate over all heads, but can use the filenodes in
300 # cachefnode to get to each .hgtags revision quickly.
344 # cachefnode to get to each .hgtags revision quickly.
301 return (repoheads, cachefnode, None, True)
345 return (repoheads, cachefnode, None, True)
302
346
303 def _writetagcache(ui, repo, heads, tagfnode, cachetags):
347 def _writetagcache(ui, repo, heads, tagfnode, cachetags):
304
305 try:
348 try:
306 cachefile = repo.vfs('cache/tags', 'w', atomictemp=True)
349 cachefile = repo.vfs('cache/tags', 'w', atomictemp=True)
307 except (OSError, IOError):
350 except (OSError, IOError):
308 return
351 return
309
352
310 ui.log('tagscache', 'writing tags cache file with %d heads and %d tags\n',
353 ui.log('tagscache', 'writing tags cache file with %d heads and %d tags\n',
311 len(heads), len(cachetags))
354 len(heads), len(cachetags))
312
355
313 realheads = repo.heads() # for sanity checks below
356 realheads = repo.heads() # for sanity checks below
314 for head in heads:
357 for head in heads:
315 # temporary sanity checks; these can probably be removed
358 # temporary sanity checks; these can probably be removed
316 # once this code has been in crew for a few weeks
359 # once this code has been in crew for a few weeks
317 assert head in repo.changelog.nodemap, \
360 assert head in repo.changelog.nodemap, \
318 'trying to write non-existent node %s to tag cache' % short(head)
361 'trying to write non-existent node %s to tag cache' % short(head)
319 assert head in realheads, \
362 assert head in realheads, \
320 'trying to write non-head %s to tag cache' % short(head)
363 'trying to write non-head %s to tag cache' % short(head)
321 assert head != nullid, \
364 assert head != nullid, \
322 'trying to write nullid to tag cache'
365 'trying to write nullid to tag cache'
323
366
324 # This can't fail because of the first assert above. When/if we
367 # This can't fail because of the first assert above. When/if we
325 # remove that assert, we might want to catch LookupError here
368 # remove that assert, we might want to catch LookupError here
326 # and downgrade it to a warning.
369 # and downgrade it to a warning.
327 rev = repo.changelog.rev(head)
370 rev = repo.changelog.rev(head)
328
371
329 fnode = tagfnode.get(head)
372 fnode = tagfnode.get(head)
330 if fnode:
373 if fnode:
331 cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode)))
374 cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode)))
332 else:
375 else:
333 cachefile.write('%d %s\n' % (rev, hex(head)))
376 cachefile.write('%d %s\n' % (rev, hex(head)))
334
377
335 # Tag names in the cache are in UTF-8 -- which is the whole reason
378 # Tag names in the cache are in UTF-8 -- which is the whole reason
336 # we keep them in UTF-8 throughout this module. If we converted
379 # we keep them in UTF-8 throughout this module. If we converted
337 # them local encoding on input, we would lose info writing them to
380 # them local encoding on input, we would lose info writing them to
338 # the cache.
381 # the cache.
339 cachefile.write('\n')
382 cachefile.write('\n')
340 for (name, (node, hist)) in sorted(cachetags.iteritems()):
383 for (name, (node, hist)) in sorted(cachetags.iteritems()):
341 for n in hist:
384 for n in hist:
342 cachefile.write("%s %s\n" % (hex(n), name))
385 cachefile.write("%s %s\n" % (hex(n), name))
343 cachefile.write("%s %s\n" % (hex(node), name))
386 cachefile.write("%s %s\n" % (hex(node), name))
344
387
345 try:
388 try:
346 cachefile.close()
389 cachefile.close()
347 except (OSError, IOError):
390 except (OSError, IOError):
348 pass
391 pass
General Comments 0
You need to be logged in to leave comments. Login now