##// END OF EJS Templates
tags: use absolute_import
Gregory Szorc -
r25982:b2f3f185 default
parent child Browse files
Show More
@@ -1,553 +1,565
1 # tags.py - read tag info from local repository
1 # tags.py - read tag info from local repository
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 # Currently this module only deals with reading and caching tags.
9 # Currently this module only deals with reading and caching tags.
10 # Eventually, it could take care of updating (adding/removing/moving)
10 # Eventually, it could take care of updating (adding/removing/moving)
11 # tags too.
11 # tags too.
12
12
13 from node import nullid, bin, hex, short
13 from __future__ import absolute_import
14 from i18n import _
14
15 import util
15 import array
16 import encoding
17 import error
18 from array import array
19 import errno
16 import errno
20 import time
17 import time
21
18
19 from .i18n import _
20 from .node import (
21 bin,
22 hex,
23 nullid,
24 short,
25 )
26 from . import (
27 encoding,
28 error,
29 util,
30 )
31
32 array = array.array
33
22 # Tags computation can be expensive and caches exist to make it fast in
34 # Tags computation can be expensive and caches exist to make it fast in
23 # the common case.
35 # the common case.
24 #
36 #
25 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
37 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
26 # each revision in the repository. The file is effectively an array of
38 # each revision in the repository. The file is effectively an array of
27 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
39 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
28 # details.
40 # details.
29 #
41 #
30 # The .hgtags filenode cache grows in proportion to the length of the
42 # The .hgtags filenode cache grows in proportion to the length of the
31 # changelog. The file is truncated when the # changelog is stripped.
43 # changelog. The file is truncated when the # changelog is stripped.
32 #
44 #
33 # The purpose of the filenode cache is to avoid the most expensive part
45 # The purpose of the filenode cache is to avoid the most expensive part
34 # of finding global tags, which is looking up the .hgtags filenode in the
46 # of finding global tags, which is looking up the .hgtags filenode in the
35 # manifest for each head. This can take dozens or over 100ms for
47 # manifest for each head. This can take dozens or over 100ms for
36 # repositories with very large manifests. Multiplied by dozens or even
48 # repositories with very large manifests. Multiplied by dozens or even
37 # hundreds of heads and there is a significant performance concern.
49 # hundreds of heads and there is a significant performance concern.
38 #
50 #
39 # There also exist a separate cache file for each repository filter.
51 # There also exist a separate cache file for each repository filter.
40 # These "tags-*" files store information about the history of tags.
52 # These "tags-*" files store information about the history of tags.
41 #
53 #
42 # The tags cache files consists of a cache validation line followed by
54 # The tags cache files consists of a cache validation line followed by
43 # a history of tags.
55 # a history of tags.
44 #
56 #
45 # The cache validation line has the format:
57 # The cache validation line has the format:
46 #
58 #
47 # <tiprev> <tipnode> [<filteredhash>]
59 # <tiprev> <tipnode> [<filteredhash>]
48 #
60 #
49 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
61 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
50 # node for that changeset. These redundantly identify the repository
62 # node for that changeset. These redundantly identify the repository
51 # tip from the time the cache was written. In addition, <filteredhash>,
63 # tip from the time the cache was written. In addition, <filteredhash>,
52 # if present, is a 40 character hex hash of the contents of the filtered
64 # if present, is a 40 character hex hash of the contents of the filtered
53 # revisions for this filter. If the set of filtered revs changes, the
65 # revisions for this filter. If the set of filtered revs changes, the
54 # hash will change and invalidate the cache.
66 # hash will change and invalidate the cache.
55 #
67 #
56 # The history part of the tags cache consists of lines of the form:
68 # The history part of the tags cache consists of lines of the form:
57 #
69 #
58 # <node> <tag>
70 # <node> <tag>
59 #
71 #
60 # (This format is identical to that of .hgtags files.)
72 # (This format is identical to that of .hgtags files.)
61 #
73 #
62 # <tag> is the tag name and <node> is the 40 character hex changeset
74 # <tag> is the tag name and <node> is the 40 character hex changeset
63 # the tag is associated with.
75 # the tag is associated with.
64 #
76 #
65 # Tags are written sorted by tag name.
77 # Tags are written sorted by tag name.
66 #
78 #
67 # Tags associated with multiple changesets have an entry for each changeset.
79 # Tags associated with multiple changesets have an entry for each changeset.
68 # The most recent changeset (in terms of revlog ordering for the head
80 # The most recent changeset (in terms of revlog ordering for the head
69 # setting it) for each tag is last.
81 # setting it) for each tag is last.
70
82
71 def findglobaltags(ui, repo, alltags, tagtypes):
83 def findglobaltags(ui, repo, alltags, tagtypes):
72 '''Find global tags in a repo.
84 '''Find global tags in a repo.
73
85
74 "alltags" maps tag name to (node, hist) 2-tuples.
86 "alltags" maps tag name to (node, hist) 2-tuples.
75
87
76 "tagtypes" maps tag name to tag type. Global tags always have the
88 "tagtypes" maps tag name to tag type. Global tags always have the
77 "global" tag type.
89 "global" tag type.
78
90
79 The "alltags" and "tagtypes" dicts are updated in place. Empty dicts
91 The "alltags" and "tagtypes" dicts are updated in place. Empty dicts
80 should be passed in.
92 should be passed in.
81
93
82 The tags cache is read and updated as a side-effect of calling.
94 The tags cache is read and updated as a side-effect of calling.
83 '''
95 '''
84 # This is so we can be lazy and assume alltags contains only global
96 # This is so we can be lazy and assume alltags contains only global
85 # tags when we pass it to _writetagcache().
97 # tags when we pass it to _writetagcache().
86 assert len(alltags) == len(tagtypes) == 0, \
98 assert len(alltags) == len(tagtypes) == 0, \
87 "findglobaltags() should be called first"
99 "findglobaltags() should be called first"
88
100
89 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
101 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
90 if cachetags is not None:
102 if cachetags is not None:
91 assert not shouldwrite
103 assert not shouldwrite
92 # XXX is this really 100% correct? are there oddball special
104 # XXX is this really 100% correct? are there oddball special
93 # cases where a global tag should outrank a local tag but won't,
105 # cases where a global tag should outrank a local tag but won't,
94 # because cachetags does not contain rank info?
106 # because cachetags does not contain rank info?
95 _updatetags(cachetags, 'global', alltags, tagtypes)
107 _updatetags(cachetags, 'global', alltags, tagtypes)
96 return
108 return
97
109
98 seen = set() # set of fnode
110 seen = set() # set of fnode
99 fctx = None
111 fctx = None
100 for head in reversed(heads): # oldest to newest
112 for head in reversed(heads): # oldest to newest
101 assert head in repo.changelog.nodemap, \
113 assert head in repo.changelog.nodemap, \
102 "tag cache returned bogus head %s" % short(head)
114 "tag cache returned bogus head %s" % short(head)
103
115
104 fnode = tagfnode.get(head)
116 fnode = tagfnode.get(head)
105 if fnode and fnode not in seen:
117 if fnode and fnode not in seen:
106 seen.add(fnode)
118 seen.add(fnode)
107 if not fctx:
119 if not fctx:
108 fctx = repo.filectx('.hgtags', fileid=fnode)
120 fctx = repo.filectx('.hgtags', fileid=fnode)
109 else:
121 else:
110 fctx = fctx.filectx(fnode)
122 fctx = fctx.filectx(fnode)
111
123
112 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
124 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
113 _updatetags(filetags, 'global', alltags, tagtypes)
125 _updatetags(filetags, 'global', alltags, tagtypes)
114
126
115 # and update the cache (if necessary)
127 # and update the cache (if necessary)
116 if shouldwrite:
128 if shouldwrite:
117 _writetagcache(ui, repo, valid, alltags)
129 _writetagcache(ui, repo, valid, alltags)
118
130
119 def readlocaltags(ui, repo, alltags, tagtypes):
131 def readlocaltags(ui, repo, alltags, tagtypes):
120 '''Read local tags in repo. Update alltags and tagtypes.'''
132 '''Read local tags in repo. Update alltags and tagtypes.'''
121 try:
133 try:
122 data = repo.vfs.read("localtags")
134 data = repo.vfs.read("localtags")
123 except IOError as inst:
135 except IOError as inst:
124 if inst.errno != errno.ENOENT:
136 if inst.errno != errno.ENOENT:
125 raise
137 raise
126 return
138 return
127
139
128 # localtags is in the local encoding; re-encode to UTF-8 on
140 # localtags is in the local encoding; re-encode to UTF-8 on
129 # input for consistency with the rest of this module.
141 # input for consistency with the rest of this module.
130 filetags = _readtags(
142 filetags = _readtags(
131 ui, repo, data.splitlines(), "localtags",
143 ui, repo, data.splitlines(), "localtags",
132 recode=encoding.fromlocal)
144 recode=encoding.fromlocal)
133
145
134 # remove tags pointing to invalid nodes
146 # remove tags pointing to invalid nodes
135 cl = repo.changelog
147 cl = repo.changelog
136 for t in filetags.keys():
148 for t in filetags.keys():
137 try:
149 try:
138 cl.rev(filetags[t][0])
150 cl.rev(filetags[t][0])
139 except (LookupError, ValueError):
151 except (LookupError, ValueError):
140 del filetags[t]
152 del filetags[t]
141
153
142 _updatetags(filetags, "local", alltags, tagtypes)
154 _updatetags(filetags, "local", alltags, tagtypes)
143
155
144 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
156 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
145 '''Read tag definitions from a file (or any source of lines).
157 '''Read tag definitions from a file (or any source of lines).
146
158
147 This function returns two sortdicts with similar information:
159 This function returns two sortdicts with similar information:
148
160
149 - the first dict, bintaghist, contains the tag information as expected by
161 - the first dict, bintaghist, contains the tag information as expected by
150 the _readtags function, i.e. a mapping from tag name to (node, hist):
162 the _readtags function, i.e. a mapping from tag name to (node, hist):
151 - node is the node id from the last line read for that name,
163 - node is the node id from the last line read for that name,
152 - hist is the list of node ids previously associated with it (in file
164 - hist is the list of node ids previously associated with it (in file
153 order). All node ids are binary, not hex.
165 order). All node ids are binary, not hex.
154
166
155 - the second dict, hextaglines, is a mapping from tag name to a list of
167 - the second dict, hextaglines, is a mapping from tag name to a list of
156 [hexnode, line number] pairs, ordered from the oldest to the newest node.
168 [hexnode, line number] pairs, ordered from the oldest to the newest node.
157
169
158 When calcnodelines is False the hextaglines dict is not calculated (an
170 When calcnodelines is False the hextaglines dict is not calculated (an
159 empty dict is returned). This is done to improve this function's
171 empty dict is returned). This is done to improve this function's
160 performance in cases where the line numbers are not needed.
172 performance in cases where the line numbers are not needed.
161 '''
173 '''
162
174
163 bintaghist = util.sortdict()
175 bintaghist = util.sortdict()
164 hextaglines = util.sortdict()
176 hextaglines = util.sortdict()
165 count = 0
177 count = 0
166
178
167 def warn(msg):
179 def warn(msg):
168 ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
180 ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
169
181
170 for nline, line in enumerate(lines):
182 for nline, line in enumerate(lines):
171 count += 1
183 count += 1
172 if not line:
184 if not line:
173 continue
185 continue
174 try:
186 try:
175 (nodehex, name) = line.split(" ", 1)
187 (nodehex, name) = line.split(" ", 1)
176 except ValueError:
188 except ValueError:
177 warn(_("cannot parse entry"))
189 warn(_("cannot parse entry"))
178 continue
190 continue
179 name = name.strip()
191 name = name.strip()
180 if recode:
192 if recode:
181 name = recode(name)
193 name = recode(name)
182 try:
194 try:
183 nodebin = bin(nodehex)
195 nodebin = bin(nodehex)
184 except TypeError:
196 except TypeError:
185 warn(_("node '%s' is not well formed") % nodehex)
197 warn(_("node '%s' is not well formed") % nodehex)
186 continue
198 continue
187
199
188 # update filetags
200 # update filetags
189 if calcnodelines:
201 if calcnodelines:
190 # map tag name to a list of line numbers
202 # map tag name to a list of line numbers
191 if name not in hextaglines:
203 if name not in hextaglines:
192 hextaglines[name] = []
204 hextaglines[name] = []
193 hextaglines[name].append([nodehex, nline])
205 hextaglines[name].append([nodehex, nline])
194 continue
206 continue
195 # map tag name to (node, hist)
207 # map tag name to (node, hist)
196 if name not in bintaghist:
208 if name not in bintaghist:
197 bintaghist[name] = []
209 bintaghist[name] = []
198 bintaghist[name].append(nodebin)
210 bintaghist[name].append(nodebin)
199 return bintaghist, hextaglines
211 return bintaghist, hextaglines
200
212
201 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
213 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
202 '''Read tag definitions from a file (or any source of lines).
214 '''Read tag definitions from a file (or any source of lines).
203
215
204 Returns a mapping from tag name to (node, hist).
216 Returns a mapping from tag name to (node, hist).
205
217
206 "node" is the node id from the last line read for that name. "hist"
218 "node" is the node id from the last line read for that name. "hist"
207 is the list of node ids previously associated with it (in file order).
219 is the list of node ids previously associated with it (in file order).
208 All node ids are binary, not hex.
220 All node ids are binary, not hex.
209 '''
221 '''
210 filetags, nodelines = _readtaghist(ui, repo, lines, fn, recode=recode,
222 filetags, nodelines = _readtaghist(ui, repo, lines, fn, recode=recode,
211 calcnodelines=calcnodelines)
223 calcnodelines=calcnodelines)
212 for tag, taghist in filetags.items():
224 for tag, taghist in filetags.items():
213 filetags[tag] = (taghist[-1], taghist[:-1])
225 filetags[tag] = (taghist[-1], taghist[:-1])
214 return filetags
226 return filetags
215
227
216 def _updatetags(filetags, tagtype, alltags, tagtypes):
228 def _updatetags(filetags, tagtype, alltags, tagtypes):
217 '''Incorporate the tag info read from one file into the two
229 '''Incorporate the tag info read from one file into the two
218 dictionaries, alltags and tagtypes, that contain all tag
230 dictionaries, alltags and tagtypes, that contain all tag
219 info (global across all heads plus local).'''
231 info (global across all heads plus local).'''
220
232
221 for name, nodehist in filetags.iteritems():
233 for name, nodehist in filetags.iteritems():
222 if name not in alltags:
234 if name not in alltags:
223 alltags[name] = nodehist
235 alltags[name] = nodehist
224 tagtypes[name] = tagtype
236 tagtypes[name] = tagtype
225 continue
237 continue
226
238
227 # we prefer alltags[name] if:
239 # we prefer alltags[name] if:
228 # it supersedes us OR
240 # it supersedes us OR
229 # mutual supersedes and it has a higher rank
241 # mutual supersedes and it has a higher rank
230 # otherwise we win because we're tip-most
242 # otherwise we win because we're tip-most
231 anode, ahist = nodehist
243 anode, ahist = nodehist
232 bnode, bhist = alltags[name]
244 bnode, bhist = alltags[name]
233 if (bnode != anode and anode in bhist and
245 if (bnode != anode and anode in bhist and
234 (bnode not in ahist or len(bhist) > len(ahist))):
246 (bnode not in ahist or len(bhist) > len(ahist))):
235 anode = bnode
247 anode = bnode
236 else:
248 else:
237 tagtypes[name] = tagtype
249 tagtypes[name] = tagtype
238 ahist.extend([n for n in bhist if n not in ahist])
250 ahist.extend([n for n in bhist if n not in ahist])
239 alltags[name] = anode, ahist
251 alltags[name] = anode, ahist
240
252
241 def _filename(repo):
253 def _filename(repo):
242 """name of a tagcache file for a given repo or repoview"""
254 """name of a tagcache file for a given repo or repoview"""
243 filename = 'cache/tags2'
255 filename = 'cache/tags2'
244 if repo.filtername:
256 if repo.filtername:
245 filename = '%s-%s' % (filename, repo.filtername)
257 filename = '%s-%s' % (filename, repo.filtername)
246 return filename
258 return filename
247
259
248 def _readtagcache(ui, repo):
260 def _readtagcache(ui, repo):
249 '''Read the tag cache.
261 '''Read the tag cache.
250
262
251 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
263 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
252
264
253 If the cache is completely up-to-date, "cachetags" is a dict of the
265 If the cache is completely up-to-date, "cachetags" is a dict of the
254 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
266 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
255 None and "shouldwrite" is False.
267 None and "shouldwrite" is False.
256
268
257 If the cache is not up to date, "cachetags" is None. "heads" is a list
269 If the cache is not up to date, "cachetags" is None. "heads" is a list
258 of all heads currently in the repository, ordered from tip to oldest.
270 of all heads currently in the repository, ordered from tip to oldest.
259 "validinfo" is a tuple describing cache validation info. This is used
271 "validinfo" is a tuple describing cache validation info. This is used
260 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
272 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
261 filenode. "shouldwrite" is True.
273 filenode. "shouldwrite" is True.
262
274
263 If the cache is not up to date, the caller is responsible for reading tag
275 If the cache is not up to date, the caller is responsible for reading tag
264 info from each returned head. (See findglobaltags().)
276 info from each returned head. (See findglobaltags().)
265 '''
277 '''
266 import scmutil # avoid cycle
278 from . import scmutil # avoid cycle
267
279
268 try:
280 try:
269 cachefile = repo.vfs(_filename(repo), 'r')
281 cachefile = repo.vfs(_filename(repo), 'r')
270 # force reading the file for static-http
282 # force reading the file for static-http
271 cachelines = iter(cachefile)
283 cachelines = iter(cachefile)
272 except IOError:
284 except IOError:
273 cachefile = None
285 cachefile = None
274
286
275 cacherev = None
287 cacherev = None
276 cachenode = None
288 cachenode = None
277 cachehash = None
289 cachehash = None
278 if cachefile:
290 if cachefile:
279 try:
291 try:
280 validline = cachelines.next()
292 validline = cachelines.next()
281 validline = validline.split()
293 validline = validline.split()
282 cacherev = int(validline[0])
294 cacherev = int(validline[0])
283 cachenode = bin(validline[1])
295 cachenode = bin(validline[1])
284 if len(validline) > 2:
296 if len(validline) > 2:
285 cachehash = bin(validline[2])
297 cachehash = bin(validline[2])
286 except Exception:
298 except Exception:
287 # corruption of the cache, just recompute it.
299 # corruption of the cache, just recompute it.
288 pass
300 pass
289
301
290 tipnode = repo.changelog.tip()
302 tipnode = repo.changelog.tip()
291 tiprev = len(repo.changelog) - 1
303 tiprev = len(repo.changelog) - 1
292
304
293 # Case 1 (common): tip is the same, so nothing has changed.
305 # Case 1 (common): tip is the same, so nothing has changed.
294 # (Unchanged tip trivially means no changesets have been added.
306 # (Unchanged tip trivially means no changesets have been added.
295 # But, thanks to localrepository.destroyed(), it also means none
307 # But, thanks to localrepository.destroyed(), it also means none
296 # have been destroyed by strip or rollback.)
308 # have been destroyed by strip or rollback.)
297 if (cacherev == tiprev
309 if (cacherev == tiprev
298 and cachenode == tipnode
310 and cachenode == tipnode
299 and cachehash == scmutil.filteredhash(repo, tiprev)):
311 and cachehash == scmutil.filteredhash(repo, tiprev)):
300 tags = _readtags(ui, repo, cachelines, cachefile.name)
312 tags = _readtags(ui, repo, cachelines, cachefile.name)
301 cachefile.close()
313 cachefile.close()
302 return (None, None, None, tags, False)
314 return (None, None, None, tags, False)
303 if cachefile:
315 if cachefile:
304 cachefile.close() # ignore rest of file
316 cachefile.close() # ignore rest of file
305
317
306 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
318 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
307
319
308 repoheads = repo.heads()
320 repoheads = repo.heads()
309 # Case 2 (uncommon): empty repo; get out quickly and don't bother
321 # Case 2 (uncommon): empty repo; get out quickly and don't bother
310 # writing an empty cache.
322 # writing an empty cache.
311 if repoheads == [nullid]:
323 if repoheads == [nullid]:
312 return ([], {}, valid, {}, False)
324 return ([], {}, valid, {}, False)
313
325
314 # Case 3 (uncommon): cache file missing or empty.
326 # Case 3 (uncommon): cache file missing or empty.
315
327
316 # Case 4 (uncommon): tip rev decreased. This should only happen
328 # Case 4 (uncommon): tip rev decreased. This should only happen
317 # when we're called from localrepository.destroyed(). Refresh the
329 # when we're called from localrepository.destroyed(). Refresh the
318 # cache so future invocations will not see disappeared heads in the
330 # cache so future invocations will not see disappeared heads in the
319 # cache.
331 # cache.
320
332
321 # Case 5 (common): tip has changed, so we've added/replaced heads.
333 # Case 5 (common): tip has changed, so we've added/replaced heads.
322
334
323 # As it happens, the code to handle cases 3, 4, 5 is the same.
335 # As it happens, the code to handle cases 3, 4, 5 is the same.
324
336
325 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
337 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
326 # exposed".
338 # exposed".
327 if not len(repo.file('.hgtags')):
339 if not len(repo.file('.hgtags')):
328 # No tags have ever been committed, so we can avoid a
340 # No tags have ever been committed, so we can avoid a
329 # potentially expensive search.
341 # potentially expensive search.
330 return ([], {}, valid, None, True)
342 return ([], {}, valid, None, True)
331
343
332 starttime = time.time()
344 starttime = time.time()
333
345
334 # Now we have to lookup the .hgtags filenode for every new head.
346 # Now we have to lookup the .hgtags filenode for every new head.
335 # This is the most expensive part of finding tags, so performance
347 # This is the most expensive part of finding tags, so performance
336 # depends primarily on the size of newheads. Worst case: no cache
348 # depends primarily on the size of newheads. Worst case: no cache
337 # file, so newheads == repoheads.
349 # file, so newheads == repoheads.
338 fnodescache = hgtagsfnodescache(repo.unfiltered())
350 fnodescache = hgtagsfnodescache(repo.unfiltered())
339 cachefnode = {}
351 cachefnode = {}
340 for head in reversed(repoheads):
352 for head in reversed(repoheads):
341 fnode = fnodescache.getfnode(head)
353 fnode = fnodescache.getfnode(head)
342 if fnode != nullid:
354 if fnode != nullid:
343 cachefnode[head] = fnode
355 cachefnode[head] = fnode
344
356
345 fnodescache.write()
357 fnodescache.write()
346
358
347 duration = time.time() - starttime
359 duration = time.time() - starttime
348 ui.log('tagscache',
360 ui.log('tagscache',
349 '%d/%d cache hits/lookups in %0.4f '
361 '%d/%d cache hits/lookups in %0.4f '
350 'seconds\n',
362 'seconds\n',
351 fnodescache.hitcount, fnodescache.lookupcount, duration)
363 fnodescache.hitcount, fnodescache.lookupcount, duration)
352
364
353 # Caller has to iterate over all heads, but can use the filenodes in
365 # Caller has to iterate over all heads, but can use the filenodes in
354 # cachefnode to get to each .hgtags revision quickly.
366 # cachefnode to get to each .hgtags revision quickly.
355 return (repoheads, cachefnode, valid, None, True)
367 return (repoheads, cachefnode, valid, None, True)
356
368
357 def _writetagcache(ui, repo, valid, cachetags):
369 def _writetagcache(ui, repo, valid, cachetags):
358 filename = _filename(repo)
370 filename = _filename(repo)
359 try:
371 try:
360 cachefile = repo.vfs(filename, 'w', atomictemp=True)
372 cachefile = repo.vfs(filename, 'w', atomictemp=True)
361 except (OSError, IOError):
373 except (OSError, IOError):
362 return
374 return
363
375
364 ui.log('tagscache', 'writing .hg/%s with %d tags\n',
376 ui.log('tagscache', 'writing .hg/%s with %d tags\n',
365 filename, len(cachetags))
377 filename, len(cachetags))
366
378
367 if valid[2]:
379 if valid[2]:
368 cachefile.write('%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2])))
380 cachefile.write('%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2])))
369 else:
381 else:
370 cachefile.write('%d %s\n' % (valid[0], hex(valid[1])))
382 cachefile.write('%d %s\n' % (valid[0], hex(valid[1])))
371
383
372 # Tag names in the cache are in UTF-8 -- which is the whole reason
384 # Tag names in the cache are in UTF-8 -- which is the whole reason
373 # we keep them in UTF-8 throughout this module. If we converted
385 # we keep them in UTF-8 throughout this module. If we converted
374 # them local encoding on input, we would lose info writing them to
386 # them local encoding on input, we would lose info writing them to
375 # the cache.
387 # the cache.
376 for (name, (node, hist)) in sorted(cachetags.iteritems()):
388 for (name, (node, hist)) in sorted(cachetags.iteritems()):
377 for n in hist:
389 for n in hist:
378 cachefile.write("%s %s\n" % (hex(n), name))
390 cachefile.write("%s %s\n" % (hex(n), name))
379 cachefile.write("%s %s\n" % (hex(node), name))
391 cachefile.write("%s %s\n" % (hex(node), name))
380
392
381 try:
393 try:
382 cachefile.close()
394 cachefile.close()
383 except (OSError, IOError):
395 except (OSError, IOError):
384 pass
396 pass
385
397
386 _fnodescachefile = 'cache/hgtagsfnodes1'
398 _fnodescachefile = 'cache/hgtagsfnodes1'
387 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
399 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
388 _fnodesmissingrec = '\xff' * 24
400 _fnodesmissingrec = '\xff' * 24
389
401
390 class hgtagsfnodescache(object):
402 class hgtagsfnodescache(object):
391 """Persistent cache mapping revisions to .hgtags filenodes.
403 """Persistent cache mapping revisions to .hgtags filenodes.
392
404
393 The cache is an array of records. Each item in the array corresponds to
405 The cache is an array of records. Each item in the array corresponds to
394 a changelog revision. Values in the array contain the first 4 bytes of
406 a changelog revision. Values in the array contain the first 4 bytes of
395 the node hash and the 20 bytes .hgtags filenode for that revision.
407 the node hash and the 20 bytes .hgtags filenode for that revision.
396
408
397 The first 4 bytes are present as a form of verification. Repository
409 The first 4 bytes are present as a form of verification. Repository
398 stripping and rewriting may change the node at a numeric revision in the
410 stripping and rewriting may change the node at a numeric revision in the
399 changelog. The changeset fragment serves as a verifier to detect
411 changelog. The changeset fragment serves as a verifier to detect
400 rewriting. This logic is shared with the rev branch cache (see
412 rewriting. This logic is shared with the rev branch cache (see
401 branchmap.py).
413 branchmap.py).
402
414
403 The instance holds in memory the full cache content but entries are
415 The instance holds in memory the full cache content but entries are
404 only parsed on read.
416 only parsed on read.
405
417
406 Instances behave like lists. ``c[i]`` works where i is a rev or
418 Instances behave like lists. ``c[i]`` works where i is a rev or
407 changeset node. Missing indexes are populated automatically on access.
419 changeset node. Missing indexes are populated automatically on access.
408 """
420 """
409 def __init__(self, repo):
421 def __init__(self, repo):
410 assert repo.filtername is None
422 assert repo.filtername is None
411
423
412 self._repo = repo
424 self._repo = repo
413
425
414 # Only for reporting purposes.
426 # Only for reporting purposes.
415 self.lookupcount = 0
427 self.lookupcount = 0
416 self.hitcount = 0
428 self.hitcount = 0
417
429
418 self._raw = array('c')
430 self._raw = array('c')
419
431
420 data = repo.vfs.tryread(_fnodescachefile)
432 data = repo.vfs.tryread(_fnodescachefile)
421 self._raw.fromstring(data)
433 self._raw.fromstring(data)
422
434
423 # The end state of self._raw is an array that is of the exact length
435 # The end state of self._raw is an array that is of the exact length
424 # required to hold a record for every revision in the repository.
436 # required to hold a record for every revision in the repository.
425 # We truncate or extend the array as necessary. self._dirtyoffset is
437 # We truncate or extend the array as necessary. self._dirtyoffset is
426 # defined to be the start offset at which we need to write the output
438 # defined to be the start offset at which we need to write the output
427 # file. This offset is also adjusted when new entries are calculated
439 # file. This offset is also adjusted when new entries are calculated
428 # for array members.
440 # for array members.
429 cllen = len(repo.changelog)
441 cllen = len(repo.changelog)
430 wantedlen = cllen * _fnodesrecsize
442 wantedlen = cllen * _fnodesrecsize
431 rawlen = len(self._raw)
443 rawlen = len(self._raw)
432
444
433 self._dirtyoffset = None
445 self._dirtyoffset = None
434
446
435 if rawlen < wantedlen:
447 if rawlen < wantedlen:
436 self._dirtyoffset = rawlen
448 self._dirtyoffset = rawlen
437 self._raw.extend('\xff' * (wantedlen - rawlen))
449 self._raw.extend('\xff' * (wantedlen - rawlen))
438 elif rawlen > wantedlen:
450 elif rawlen > wantedlen:
439 # There's no easy way to truncate array instances. This seems
451 # There's no easy way to truncate array instances. This seems
440 # slightly less evil than copying a potentially large array slice.
452 # slightly less evil than copying a potentially large array slice.
441 for i in range(rawlen - wantedlen):
453 for i in range(rawlen - wantedlen):
442 self._raw.pop()
454 self._raw.pop()
443 self._dirtyoffset = len(self._raw)
455 self._dirtyoffset = len(self._raw)
444
456
445 def getfnode(self, node, computemissing=True):
457 def getfnode(self, node, computemissing=True):
446 """Obtain the filenode of the .hgtags file at a specified revision.
458 """Obtain the filenode of the .hgtags file at a specified revision.
447
459
448 If the value is in the cache, the entry will be validated and returned.
460 If the value is in the cache, the entry will be validated and returned.
449 Otherwise, the filenode will be computed and returned unless
461 Otherwise, the filenode will be computed and returned unless
450 "computemissing" is False, in which case None will be returned without
462 "computemissing" is False, in which case None will be returned without
451 any potentially expensive computation being performed.
463 any potentially expensive computation being performed.
452
464
453 If an .hgtags does not exist at the specified revision, nullid is
465 If an .hgtags does not exist at the specified revision, nullid is
454 returned.
466 returned.
455 """
467 """
456 ctx = self._repo[node]
468 ctx = self._repo[node]
457 rev = ctx.rev()
469 rev = ctx.rev()
458
470
459 self.lookupcount += 1
471 self.lookupcount += 1
460
472
461 offset = rev * _fnodesrecsize
473 offset = rev * _fnodesrecsize
462 record = self._raw[offset:offset + _fnodesrecsize].tostring()
474 record = self._raw[offset:offset + _fnodesrecsize].tostring()
463 properprefix = node[0:4]
475 properprefix = node[0:4]
464
476
465 # Validate and return existing entry.
477 # Validate and return existing entry.
466 if record != _fnodesmissingrec:
478 if record != _fnodesmissingrec:
467 fileprefix = record[0:4]
479 fileprefix = record[0:4]
468
480
469 if fileprefix == properprefix:
481 if fileprefix == properprefix:
470 self.hitcount += 1
482 self.hitcount += 1
471 return record[4:]
483 return record[4:]
472
484
473 # Fall through.
485 # Fall through.
474
486
475 # If we get here, the entry is either missing or invalid.
487 # If we get here, the entry is either missing or invalid.
476
488
477 if not computemissing:
489 if not computemissing:
478 return None
490 return None
479
491
480 # Populate missing entry.
492 # Populate missing entry.
481 try:
493 try:
482 fnode = ctx.filenode('.hgtags')
494 fnode = ctx.filenode('.hgtags')
483 except error.LookupError:
495 except error.LookupError:
484 # No .hgtags file on this revision.
496 # No .hgtags file on this revision.
485 fnode = nullid
497 fnode = nullid
486
498
487 self._writeentry(offset, properprefix, fnode)
499 self._writeentry(offset, properprefix, fnode)
488 return fnode
500 return fnode
489
501
490 def setfnode(self, node, fnode):
502 def setfnode(self, node, fnode):
491 """Set the .hgtags filenode for a given changeset."""
503 """Set the .hgtags filenode for a given changeset."""
492 assert len(fnode) == 20
504 assert len(fnode) == 20
493 ctx = self._repo[node]
505 ctx = self._repo[node]
494
506
495 # Do a lookup first to avoid writing if nothing has changed.
507 # Do a lookup first to avoid writing if nothing has changed.
496 if self.getfnode(ctx.node(), computemissing=False) == fnode:
508 if self.getfnode(ctx.node(), computemissing=False) == fnode:
497 return
509 return
498
510
499 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
511 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
500
512
501 def _writeentry(self, offset, prefix, fnode):
513 def _writeentry(self, offset, prefix, fnode):
502 # Slices on array instances only accept other array.
514 # Slices on array instances only accept other array.
503 entry = array('c', prefix + fnode)
515 entry = array('c', prefix + fnode)
504 self._raw[offset:offset + _fnodesrecsize] = entry
516 self._raw[offset:offset + _fnodesrecsize] = entry
505 # self._dirtyoffset could be None.
517 # self._dirtyoffset could be None.
506 self._dirtyoffset = min(self._dirtyoffset, offset) or 0
518 self._dirtyoffset = min(self._dirtyoffset, offset) or 0
507
519
508 def write(self):
520 def write(self):
509 """Perform all necessary writes to cache file.
521 """Perform all necessary writes to cache file.
510
522
511 This may no-op if no writes are needed or if a write lock could
523 This may no-op if no writes are needed or if a write lock could
512 not be obtained.
524 not be obtained.
513 """
525 """
514 if self._dirtyoffset is None:
526 if self._dirtyoffset is None:
515 return
527 return
516
528
517 data = self._raw[self._dirtyoffset:]
529 data = self._raw[self._dirtyoffset:]
518 if not data:
530 if not data:
519 return
531 return
520
532
521 repo = self._repo
533 repo = self._repo
522
534
523 try:
535 try:
524 lock = repo.wlock(wait=False)
536 lock = repo.wlock(wait=False)
525 except error.LockError:
537 except error.LockError:
526 repo.ui.log('tagscache',
538 repo.ui.log('tagscache',
527 'not writing .hg/%s because lock cannot be acquired\n' %
539 'not writing .hg/%s because lock cannot be acquired\n' %
528 (_fnodescachefile))
540 (_fnodescachefile))
529 return
541 return
530
542
531 try:
543 try:
532 f = repo.vfs.open(_fnodescachefile, 'ab')
544 f = repo.vfs.open(_fnodescachefile, 'ab')
533 try:
545 try:
534 # if the file has been truncated
546 # if the file has been truncated
535 actualoffset = f.tell()
547 actualoffset = f.tell()
536 if actualoffset < self._dirtyoffset:
548 if actualoffset < self._dirtyoffset:
537 self._dirtyoffset = actualoffset
549 self._dirtyoffset = actualoffset
538 data = self._raw[self._dirtyoffset:]
550 data = self._raw[self._dirtyoffset:]
539 f.seek(self._dirtyoffset)
551 f.seek(self._dirtyoffset)
540 f.truncate()
552 f.truncate()
541 repo.ui.log('tagscache',
553 repo.ui.log('tagscache',
542 'writing %d bytes to %s\n' % (
554 'writing %d bytes to %s\n' % (
543 len(data), _fnodescachefile))
555 len(data), _fnodescachefile))
544 f.write(data)
556 f.write(data)
545 self._dirtyoffset = None
557 self._dirtyoffset = None
546 finally:
558 finally:
547 f.close()
559 f.close()
548 except (IOError, OSError) as inst:
560 except (IOError, OSError) as inst:
549 repo.ui.log('tagscache',
561 repo.ui.log('tagscache',
550 "couldn't write %s: %s\n" % (
562 "couldn't write %s: %s\n" % (
551 _fnodescachefile, inst))
563 _fnodescachefile, inst))
552 finally:
564 finally:
553 lock.release()
565 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now