##// END OF EJS Templates
kill trailing whitespace
Dirkjan Ochtman -
r9312:c5f0825c default
parent child Browse files
Show More
@@ -1,339 +1,338
1 # tags.py - read tag info from local repository
1 # tags.py - read tag info from local repository
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2, incorporated herein by reference.
7 # GNU General Public License version 2, incorporated herein by reference.
8
8
9 # Currently this module only deals with reading and caching tags.
9 # Currently this module only deals with reading and caching tags.
10 # Eventually, it could take care of updating (adding/removing/moving)
10 # Eventually, it could take care of updating (adding/removing/moving)
11 # tags too.
11 # tags too.
12
12
13 import os
13 import os
14 from node import nullid, bin, hex, short
14 from node import nullid, bin, hex, short
15 from i18n import _
15 from i18n import _
16 import encoding
16 import encoding
17 import error
17 import error
18
18
19 def _debugalways(ui, *msg):
19 def _debugalways(ui, *msg):
20 ui.write(*msg)
20 ui.write(*msg)
21
21
22 def _debugconditional(ui, *msg):
22 def _debugconditional(ui, *msg):
23 ui.debug(*msg)
23 ui.debug(*msg)
24
24
25 def _debugnever(ui, *msg):
25 def _debugnever(ui, *msg):
26 pass
26 pass
27
27
28 _debug = _debugalways
28 _debug = _debugalways
29 _debug = _debugnever
29 _debug = _debugnever
30
30
31 def findglobaltags1(ui, repo, alltags, tagtypes):
31 def findglobaltags1(ui, repo, alltags, tagtypes):
32 '''Find global tags in repo by reading .hgtags from every head that
32 '''Find global tags in repo by reading .hgtags from every head that
33 has a distinct version of it. Updates the dicts alltags, tagtypes
33 has a distinct version of it. Updates the dicts alltags, tagtypes
34 in place: alltags maps tag name to (node, hist) pair (see _readtags()
34 in place: alltags maps tag name to (node, hist) pair (see _readtags()
35 below), and tagtypes maps tag name to tag type ('global' in this
35 below), and tagtypes maps tag name to tag type ('global' in this
36 case).'''
36 case).'''
37
37
38 seen = set()
38 seen = set()
39 fctx = None
39 fctx = None
40 ctxs = [] # list of filectx
40 ctxs = [] # list of filectx
41 for node in repo.heads():
41 for node in repo.heads():
42 try:
42 try:
43 fnode = repo[node].filenode('.hgtags')
43 fnode = repo[node].filenode('.hgtags')
44 except error.LookupError:
44 except error.LookupError:
45 continue
45 continue
46 if fnode not in seen:
46 if fnode not in seen:
47 seen.add(fnode)
47 seen.add(fnode)
48 if not fctx:
48 if not fctx:
49 fctx = repo.filectx('.hgtags', fileid=fnode)
49 fctx = repo.filectx('.hgtags', fileid=fnode)
50 else:
50 else:
51 fctx = fctx.filectx(fnode)
51 fctx = fctx.filectx(fnode)
52 ctxs.append(fctx)
52 ctxs.append(fctx)
53
53
54 # read the tags file from each head, ending with the tip
54 # read the tags file from each head, ending with the tip
55 for fctx in reversed(ctxs):
55 for fctx in reversed(ctxs):
56 filetags = _readtags(
56 filetags = _readtags(
57 ui, repo, fctx.data().splitlines(), fctx)
57 ui, repo, fctx.data().splitlines(), fctx)
58 _updatetags(filetags, "global", alltags, tagtypes)
58 _updatetags(filetags, "global", alltags, tagtypes)
59
59
60 def findglobaltags2(ui, repo, alltags, tagtypes):
60 def findglobaltags2(ui, repo, alltags, tagtypes):
61 '''Same as findglobaltags1(), but with caching.'''
61 '''Same as findglobaltags1(), but with caching.'''
62 # This is so we can be lazy and assume alltags contains only global
62 # This is so we can be lazy and assume alltags contains only global
63 # tags when we pass it to _writetagcache().
63 # tags when we pass it to _writetagcache().
64 assert len(alltags) == len(tagtypes) == 0, \
64 assert len(alltags) == len(tagtypes) == 0, \
65 "findglobaltags() should be called first"
65 "findglobaltags() should be called first"
66
66
67 (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo)
67 (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo)
68 if cachetags is not None:
68 if cachetags is not None:
69 assert not shouldwrite
69 assert not shouldwrite
70 # XXX is this really 100% correct? are there oddball special
70 # XXX is this really 100% correct? are there oddball special
71 # cases where a global tag should outrank a local tag but won't,
71 # cases where a global tag should outrank a local tag but won't,
72 # because cachetags does not contain rank info?
72 # because cachetags does not contain rank info?
73 _updatetags(cachetags, 'global', alltags, tagtypes)
73 _updatetags(cachetags, 'global', alltags, tagtypes)
74 return
74 return
75
75
76 _debug(ui, "reading tags from %d head(s): %s\n"
76 _debug(ui, "reading tags from %d head(s): %s\n"
77 % (len(heads), map(short, reversed(heads))))
77 % (len(heads), map(short, reversed(heads))))
78 seen = set() # set of fnode
78 seen = set() # set of fnode
79 fctx = None
79 fctx = None
80 for head in reversed(heads): # oldest to newest
80 for head in reversed(heads): # oldest to newest
81 assert head in repo.changelog.nodemap, \
81 assert head in repo.changelog.nodemap, \
82 "tag cache returned bogus head %s" % short(head)
82 "tag cache returned bogus head %s" % short(head)
83
83
84 fnode = tagfnode.get(head)
84 fnode = tagfnode.get(head)
85 if fnode and fnode not in seen:
85 if fnode and fnode not in seen:
86 seen.add(fnode)
86 seen.add(fnode)
87 if not fctx:
87 if not fctx:
88 fctx = repo.filectx('.hgtags', fileid=fnode)
88 fctx = repo.filectx('.hgtags', fileid=fnode)
89 else:
89 else:
90 fctx = fctx.filectx(fnode)
90 fctx = fctx.filectx(fnode)
91
91
92 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
92 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
93 _updatetags(filetags, 'global', alltags, tagtypes)
93 _updatetags(filetags, 'global', alltags, tagtypes)
94
94
95 # and update the cache (if necessary)
95 # and update the cache (if necessary)
96 if shouldwrite:
96 if shouldwrite:
97 _writetagcache(ui, repo, heads, tagfnode, alltags)
97 _writetagcache(ui, repo, heads, tagfnode, alltags)
98
98
99 # Set this to findglobaltags1 to disable tag caching.
99 # Set this to findglobaltags1 to disable tag caching.
100 findglobaltags = findglobaltags2
100 findglobaltags = findglobaltags2
101
101
102 def readlocaltags(ui, repo, alltags, tagtypes):
102 def readlocaltags(ui, repo, alltags, tagtypes):
103 '''Read local tags in repo. Update alltags and tagtypes.'''
103 '''Read local tags in repo. Update alltags and tagtypes.'''
104 try:
104 try:
105 # localtags is in the local encoding; re-encode to UTF-8 on
105 # localtags is in the local encoding; re-encode to UTF-8 on
106 # input for consistency with the rest of this module.
106 # input for consistency with the rest of this module.
107 data = repo.opener("localtags").read()
107 data = repo.opener("localtags").read()
108 filetags = _readtags(
108 filetags = _readtags(
109 ui, repo, data.splitlines(), "localtags",
109 ui, repo, data.splitlines(), "localtags",
110 recode=encoding.fromlocal)
110 recode=encoding.fromlocal)
111 _updatetags(filetags, "local", alltags, tagtypes)
111 _updatetags(filetags, "local", alltags, tagtypes)
112 except IOError:
112 except IOError:
113 pass
113 pass
114
114
115 def _readtags(ui, repo, lines, fn, recode=None):
115 def _readtags(ui, repo, lines, fn, recode=None):
116 '''Read tag definitions from a file (or any source of lines).
116 '''Read tag definitions from a file (or any source of lines).
117 Return a mapping from tag name to (node, hist): node is the node id
117 Return a mapping from tag name to (node, hist): node is the node id
118 from the last line read for that name, and hist is the list of node
118 from the last line read for that name, and hist is the list of node
119 ids previously associated with it (in file order). All node ids are
119 ids previously associated with it (in file order). All node ids are
120 binary, not hex.'''
120 binary, not hex.'''
121
121
122 filetags = {} # map tag name to (node, hist)
122 filetags = {} # map tag name to (node, hist)
123 count = 0
123 count = 0
124
124
125 def warn(msg):
125 def warn(msg):
126 ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
126 ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
127
127
128 for line in lines:
128 for line in lines:
129 count += 1
129 count += 1
130 if not line:
130 if not line:
131 continue
131 continue
132 try:
132 try:
133 (nodehex, name) = line.split(" ", 1)
133 (nodehex, name) = line.split(" ", 1)
134 except ValueError:
134 except ValueError:
135 warn(_("cannot parse entry"))
135 warn(_("cannot parse entry"))
136 continue
136 continue
137 name = name.strip()
137 name = name.strip()
138 if recode:
138 if recode:
139 name = recode(name)
139 name = recode(name)
140 try:
140 try:
141 nodebin = bin(nodehex)
141 nodebin = bin(nodehex)
142 except TypeError:
142 except TypeError:
143 warn(_("node '%s' is not well formed") % nodehex)
143 warn(_("node '%s' is not well formed") % nodehex)
144 continue
144 continue
145 if nodebin not in repo.changelog.nodemap:
145 if nodebin not in repo.changelog.nodemap:
146 # silently ignore as pull -r might cause this
146 # silently ignore as pull -r might cause this
147 continue
147 continue
148
148
149 # update filetags
149 # update filetags
150 hist = []
150 hist = []
151 if name in filetags:
151 if name in filetags:
152 n, hist = filetags[name]
152 n, hist = filetags[name]
153 hist.append(n)
153 hist.append(n)
154 filetags[name] = (nodebin, hist)
154 filetags[name] = (nodebin, hist)
155 return filetags
155 return filetags
156
156
157 def _updatetags(filetags, tagtype, alltags, tagtypes):
157 def _updatetags(filetags, tagtype, alltags, tagtypes):
158 '''Incorporate the tag info read from one file into the two
158 '''Incorporate the tag info read from one file into the two
159 dictionaries, alltags and tagtypes, that contain all tag
159 dictionaries, alltags and tagtypes, that contain all tag
160 info (global across all heads plus local).'''
160 info (global across all heads plus local).'''
161
161
162 for name, nodehist in filetags.iteritems():
162 for name, nodehist in filetags.iteritems():
163 if name not in alltags:
163 if name not in alltags:
164 alltags[name] = nodehist
164 alltags[name] = nodehist
165 tagtypes[name] = tagtype
165 tagtypes[name] = tagtype
166 continue
166 continue
167
167
168 # we prefer alltags[name] if:
168 # we prefer alltags[name] if:
169 # it supercedes us OR
169 # it supercedes us OR
170 # mutual supercedes and it has a higher rank
170 # mutual supercedes and it has a higher rank
171 # otherwise we win because we're tip-most
171 # otherwise we win because we're tip-most
172 anode, ahist = nodehist
172 anode, ahist = nodehist
173 bnode, bhist = alltags[name]
173 bnode, bhist = alltags[name]
174 if (bnode != anode and anode in bhist and
174 if (bnode != anode and anode in bhist and
175 (bnode not in ahist or len(bhist) > len(ahist))):
175 (bnode not in ahist or len(bhist) > len(ahist))):
176 anode = bnode
176 anode = bnode
177 ahist.extend([n for n in bhist if n not in ahist])
177 ahist.extend([n for n in bhist if n not in ahist])
178 alltags[name] = anode, ahist
178 alltags[name] = anode, ahist
179 tagtypes[name] = tagtype
179 tagtypes[name] = tagtype
180
180
181
181
182 # The tag cache only stores info about heads, not the tag contents
182 # The tag cache only stores info about heads, not the tag contents
183 # from each head. I.e. it doesn't try to squeeze out the maximum
183 # from each head. I.e. it doesn't try to squeeze out the maximum
184 # performance, but is simpler has a better chance of actually
184 # performance, but is simpler has a better chance of actually
185 # working correctly. And this gives the biggest performance win: it
185 # working correctly. And this gives the biggest performance win: it
186 # avoids looking up .hgtags in the manifest for every head, and it
186 # avoids looking up .hgtags in the manifest for every head, and it
187 # can avoid calling heads() at all if there have been no changes to
187 # can avoid calling heads() at all if there have been no changes to
188 # the repo.
188 # the repo.
189
189
190 def _readtagcache(ui, repo):
190 def _readtagcache(ui, repo):
191 '''Read the tag cache and return a tuple (heads, fnodes, cachetags,
191 '''Read the tag cache and return a tuple (heads, fnodes, cachetags,
192 shouldwrite). If the cache is completely up-to-date, cachetags is a
192 shouldwrite). If the cache is completely up-to-date, cachetags is a
193 dict of the form returned by _readtags(); otherwise, it is None and
193 dict of the form returned by _readtags(); otherwise, it is None and
194 heads and fnodes are set. In that case, heads is the list of all
194 heads and fnodes are set. In that case, heads is the list of all
195 heads currently in the repository (ordered from tip to oldest) and
195 heads currently in the repository (ordered from tip to oldest) and
196 fnodes is a mapping from head to .hgtags filenode. If those two are
196 fnodes is a mapping from head to .hgtags filenode. If those two are
197 set, caller is responsible for reading tag info from each head.'''
197 set, caller is responsible for reading tag info from each head.'''
198
198
199 try:
199 try:
200 cachefile = repo.opener('tags.cache', 'r')
200 cachefile = repo.opener('tags.cache', 'r')
201 _debug(ui, 'reading tag cache from %s\n' % cachefile.name)
201 _debug(ui, 'reading tag cache from %s\n' % cachefile.name)
202 except IOError:
202 except IOError:
203 cachefile = None
203 cachefile = None
204
204
205 # The cache file consists of lines like
205 # The cache file consists of lines like
206 # <headrev> <headnode> [<tagnode>]
206 # <headrev> <headnode> [<tagnode>]
207 # where <headrev> and <headnode> redundantly identify a repository
207 # where <headrev> and <headnode> redundantly identify a repository
208 # head from the time the cache was written, and <tagnode> is the
208 # head from the time the cache was written, and <tagnode> is the
209 # filenode of .hgtags on that head. Heads with no .hgtags file will
209 # filenode of .hgtags on that head. Heads with no .hgtags file will
210 # have no <tagnode>. The cache is ordered from tip to oldest (which
210 # have no <tagnode>. The cache is ordered from tip to oldest (which
211 # is part of why <headrev> is there: a quick visual check is all
211 # is part of why <headrev> is there: a quick visual check is all
212 # that's required to ensure correct order).
212 # that's required to ensure correct order).
213 #
213 #
214 # This information is enough to let us avoid the most expensive part
214 # This information is enough to let us avoid the most expensive part
215 # of finding global tags, which is looking up <tagnode> in the
215 # of finding global tags, which is looking up <tagnode> in the
216 # manifest for each head.
216 # manifest for each head.
217 cacherevs = [] # list of headrev
217 cacherevs = [] # list of headrev
218 cacheheads = [] # list of headnode
218 cacheheads = [] # list of headnode
219 cachefnode = {} # map headnode to filenode
219 cachefnode = {} # map headnode to filenode
220 if cachefile:
220 if cachefile:
221 for line in cachefile:
221 for line in cachefile:
222 if line == "\n":
222 if line == "\n":
223 break
223 break
224 line = line.rstrip().split()
224 line = line.rstrip().split()
225 cacherevs.append(int(line[0]))
225 cacherevs.append(int(line[0]))
226 headnode = bin(line[1])
226 headnode = bin(line[1])
227 cacheheads.append(headnode)
227 cacheheads.append(headnode)
228 if len(line) == 3:
228 if len(line) == 3:
229 fnode = bin(line[2])
229 fnode = bin(line[2])
230 cachefnode[headnode] = fnode
230 cachefnode[headnode] = fnode
231
231
232 tipnode = repo.changelog.tip()
232 tipnode = repo.changelog.tip()
233 tiprev = len(repo.changelog) - 1
233 tiprev = len(repo.changelog) - 1
234
234
235 # Case 1 (common): tip is the same, so nothing has changed.
235 # Case 1 (common): tip is the same, so nothing has changed.
236 # (Unchanged tip trivially means no changesets have been added.
236 # (Unchanged tip trivially means no changesets have been added.
237 # But, thanks to localrepository.destroyed(), it also means none
237 # But, thanks to localrepository.destroyed(), it also means none
238 # have been destroyed by strip or rollback.)
238 # have been destroyed by strip or rollback.)
239 if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev:
239 if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev:
240 _debug(ui, "tag cache: tip unchanged\n")
240 _debug(ui, "tag cache: tip unchanged\n")
241 tags = _readtags(ui, repo, cachefile, cachefile.name)
241 tags = _readtags(ui, repo, cachefile, cachefile.name)
242 cachefile.close()
242 cachefile.close()
243 return (None, None, tags, False)
243 return (None, None, tags, False)
244 if cachefile:
244 if cachefile:
245 cachefile.close() # ignore rest of file
245 cachefile.close() # ignore rest of file
246
246
247 repoheads = repo.heads()
247 repoheads = repo.heads()
248
249 # Case 2 (uncommon): empty repo; get out quickly and don't bother
248 # Case 2 (uncommon): empty repo; get out quickly and don't bother
250 # writing an empty cache.
249 # writing an empty cache.
251 if repoheads == [nullid]:
250 if repoheads == [nullid]:
252 return ([], {}, {}, False)
251 return ([], {}, {}, False)
253
252
254 # Case 3 (uncommon): cache file missing or empty.
253 # Case 3 (uncommon): cache file missing or empty.
255 if not cacheheads:
254 if not cacheheads:
256 _debug(ui, 'tag cache: cache file missing or empty\n')
255 _debug(ui, 'tag cache: cache file missing or empty\n')
257
256
258 # Case 4 (uncommon): tip rev decreased. This should only happen
257 # Case 4 (uncommon): tip rev decreased. This should only happen
259 # when we're called from localrepository.destroyed(). Refresh the
258 # when we're called from localrepository.destroyed(). Refresh the
260 # cache so future invocations will not see disappeared heads in the
259 # cache so future invocations will not see disappeared heads in the
261 # cache.
260 # cache.
262 elif cacheheads and tiprev < cacherevs[0]:
261 elif cacheheads and tiprev < cacherevs[0]:
263 _debug(ui,
262 _debug(ui,
264 'tag cache: tip rev decremented (from %d to %d), '
263 'tag cache: tip rev decremented (from %d to %d), '
265 'so we must be destroying nodes\n'
264 'so we must be destroying nodes\n'
266 % (cacherevs[0], tiprev))
265 % (cacherevs[0], tiprev))
267
266
268 # Case 5 (common): tip has changed, so we've added/replaced heads.
267 # Case 5 (common): tip has changed, so we've added/replaced heads.
269 else:
268 else:
270 _debug(ui,
269 _debug(ui,
271 'tag cache: tip has changed (%d:%s); must find new heads\n'
270 'tag cache: tip has changed (%d:%s); must find new heads\n'
272 % (tiprev, short(tipnode)))
271 % (tiprev, short(tipnode)))
273
272
274 # Luckily, the code to handle cases 3, 4, 5 is the same. So the
273 # Luckily, the code to handle cases 3, 4, 5 is the same. So the
275 # above if/elif/else can disappear once we're confident this thing
274 # above if/elif/else can disappear once we're confident this thing
276 # actually works and we don't need the debug output.
275 # actually works and we don't need the debug output.
277
276
278 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
277 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
279 # exposed".
278 # exposed".
280 newheads = [head
279 newheads = [head
281 for head in repoheads
280 for head in repoheads
282 if head not in set(cacheheads)]
281 if head not in set(cacheheads)]
283 _debug(ui, 'tag cache: found %d head(s) not in cache: %s\n'
282 _debug(ui, 'tag cache: found %d head(s) not in cache: %s\n'
284 % (len(newheads), map(short, newheads)))
283 % (len(newheads), map(short, newheads)))
285
284
286 # Now we have to lookup the .hgtags filenode for every new head.
285 # Now we have to lookup the .hgtags filenode for every new head.
287 # This is the most expensive part of finding tags, so performance
286 # This is the most expensive part of finding tags, so performance
288 # depends primarily on the size of newheads. Worst case: no cache
287 # depends primarily on the size of newheads. Worst case: no cache
289 # file, so newheads == repoheads.
288 # file, so newheads == repoheads.
290 for head in newheads:
289 for head in newheads:
291 cctx = repo[head]
290 cctx = repo[head]
292 try:
291 try:
293 fnode = cctx.filenode('.hgtags')
292 fnode = cctx.filenode('.hgtags')
294 cachefnode[head] = fnode
293 cachefnode[head] = fnode
295 except error.LookupError:
294 except error.LookupError:
296 # no .hgtags file on this head
295 # no .hgtags file on this head
297 pass
296 pass
298
297
299 # Caller has to iterate over all heads, but can use the filenodes in
298 # Caller has to iterate over all heads, but can use the filenodes in
300 # cachefnode to get to each .hgtags revision quickly.
299 # cachefnode to get to each .hgtags revision quickly.
301 return (repoheads, cachefnode, None, True)
300 return (repoheads, cachefnode, None, True)
302
301
303 def _writetagcache(ui, repo, heads, tagfnode, cachetags):
302 def _writetagcache(ui, repo, heads, tagfnode, cachetags):
304
303
305 cachefile = repo.opener('tags.cache', 'w', atomictemp=True)
304 cachefile = repo.opener('tags.cache', 'w', atomictemp=True)
306 _debug(ui, 'writing cache file %s\n' % cachefile.name)
305 _debug(ui, 'writing cache file %s\n' % cachefile.name)
307
306
308 realheads = repo.heads() # for sanity checks below
307 realheads = repo.heads() # for sanity checks below
309 for head in heads:
308 for head in heads:
310 # temporary sanity checks; these can probably be removed
309 # temporary sanity checks; these can probably be removed
311 # once this code has been in crew for a few weeks
310 # once this code has been in crew for a few weeks
312 assert head in repo.changelog.nodemap, \
311 assert head in repo.changelog.nodemap, \
313 'trying to write non-existent node %s to tag cache' % short(head)
312 'trying to write non-existent node %s to tag cache' % short(head)
314 assert head in realheads, \
313 assert head in realheads, \
315 'trying to write non-head %s to tag cache' % short(head)
314 'trying to write non-head %s to tag cache' % short(head)
316 assert head != nullid, \
315 assert head != nullid, \
317 'trying to write nullid to tag cache'
316 'trying to write nullid to tag cache'
318
317
319 # This can't fail because of the first assert above. When/if we
318 # This can't fail because of the first assert above. When/if we
320 # remove that assert, we might want to catch LookupError here
319 # remove that assert, we might want to catch LookupError here
321 # and downgrade it to a warning.
320 # and downgrade it to a warning.
322 rev = repo.changelog.rev(head)
321 rev = repo.changelog.rev(head)
323
322
324 fnode = tagfnode.get(head)
323 fnode = tagfnode.get(head)
325 if fnode:
324 if fnode:
326 cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode)))
325 cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode)))
327 else:
326 else:
328 cachefile.write('%d %s\n' % (rev, hex(head)))
327 cachefile.write('%d %s\n' % (rev, hex(head)))
329
328
330 # Tag names in the cache are in UTF-8 -- which is the whole reason
329 # Tag names in the cache are in UTF-8 -- which is the whole reason
331 # we keep them in UTF-8 throughout this module. If we converted
330 # we keep them in UTF-8 throughout this module. If we converted
332 # them local encoding on input, we would lose info writing them to
331 # them local encoding on input, we would lose info writing them to
333 # the cache.
332 # the cache.
334 cachefile.write('\n')
333 cachefile.write('\n')
335 for (name, (node, hist)) in cachetags.iteritems():
334 for (name, (node, hist)) in cachetags.iteritems():
336 cachefile.write("%s %s\n" % (hex(node), name))
335 cachefile.write("%s %s\n" % (hex(node), name))
337
336
338 cachefile.rename()
337 cachefile.rename()
339 cachefile.close()
338 cachefile.close()
General Comments 0
You need to be logged in to leave comments. Login now