##// END OF EJS Templates
hgtagsfnodescache: handle nullid lookup...
marmoute -
r42422:2930b313 default
parent child Browse files
Show More
@@ -1,790 +1,793 b''
1 # tags.py - read tag info from local repository
1 # tags.py - read tag info from local repository
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 # Currently this module only deals with reading and caching tags.
9 # Currently this module only deals with reading and caching tags.
10 # Eventually, it could take care of updating (adding/removing/moving)
10 # Eventually, it could take care of updating (adding/removing/moving)
11 # tags too.
11 # tags too.
12
12
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import errno
15 import errno
16
16
17 from .node import (
17 from .node import (
18 bin,
18 bin,
19 hex,
19 hex,
20 nullid,
20 nullid,
21 short,
21 short,
22 )
22 )
23 from .i18n import _
23 from .i18n import _
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 error,
26 error,
27 match as matchmod,
27 match as matchmod,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 )
30 )
31 from .utils import (
31 from .utils import (
32 stringutil,
32 stringutil,
33 )
33 )
34
34
35 # Tags computation can be expensive and caches exist to make it fast in
35 # Tags computation can be expensive and caches exist to make it fast in
36 # the common case.
36 # the common case.
37 #
37 #
38 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
38 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
39 # each revision in the repository. The file is effectively an array of
39 # each revision in the repository. The file is effectively an array of
40 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
40 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
41 # details.
41 # details.
42 #
42 #
43 # The .hgtags filenode cache grows in proportion to the length of the
43 # The .hgtags filenode cache grows in proportion to the length of the
44 # changelog. The file is truncated when the # changelog is stripped.
44 # changelog. The file is truncated when the # changelog is stripped.
45 #
45 #
46 # The purpose of the filenode cache is to avoid the most expensive part
46 # The purpose of the filenode cache is to avoid the most expensive part
47 # of finding global tags, which is looking up the .hgtags filenode in the
47 # of finding global tags, which is looking up the .hgtags filenode in the
48 # manifest for each head. This can take dozens or over 100ms for
48 # manifest for each head. This can take dozens or over 100ms for
49 # repositories with very large manifests. Multiplied by dozens or even
49 # repositories with very large manifests. Multiplied by dozens or even
50 # hundreds of heads and there is a significant performance concern.
50 # hundreds of heads and there is a significant performance concern.
51 #
51 #
52 # There also exist a separate cache file for each repository filter.
52 # There also exist a separate cache file for each repository filter.
53 # These "tags-*" files store information about the history of tags.
53 # These "tags-*" files store information about the history of tags.
54 #
54 #
55 # The tags cache files consists of a cache validation line followed by
55 # The tags cache files consists of a cache validation line followed by
56 # a history of tags.
56 # a history of tags.
57 #
57 #
58 # The cache validation line has the format:
58 # The cache validation line has the format:
59 #
59 #
60 # <tiprev> <tipnode> [<filteredhash>]
60 # <tiprev> <tipnode> [<filteredhash>]
61 #
61 #
62 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
62 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
63 # node for that changeset. These redundantly identify the repository
63 # node for that changeset. These redundantly identify the repository
64 # tip from the time the cache was written. In addition, <filteredhash>,
64 # tip from the time the cache was written. In addition, <filteredhash>,
65 # if present, is a 40 character hex hash of the contents of the filtered
65 # if present, is a 40 character hex hash of the contents of the filtered
66 # revisions for this filter. If the set of filtered revs changes, the
66 # revisions for this filter. If the set of filtered revs changes, the
67 # hash will change and invalidate the cache.
67 # hash will change and invalidate the cache.
68 #
68 #
69 # The history part of the tags cache consists of lines of the form:
69 # The history part of the tags cache consists of lines of the form:
70 #
70 #
71 # <node> <tag>
71 # <node> <tag>
72 #
72 #
73 # (This format is identical to that of .hgtags files.)
73 # (This format is identical to that of .hgtags files.)
74 #
74 #
75 # <tag> is the tag name and <node> is the 40 character hex changeset
75 # <tag> is the tag name and <node> is the 40 character hex changeset
76 # the tag is associated with.
76 # the tag is associated with.
77 #
77 #
78 # Tags are written sorted by tag name.
78 # Tags are written sorted by tag name.
79 #
79 #
80 # Tags associated with multiple changesets have an entry for each changeset.
80 # Tags associated with multiple changesets have an entry for each changeset.
81 # The most recent changeset (in terms of revlog ordering for the head
81 # The most recent changeset (in terms of revlog ordering for the head
82 # setting it) for each tag is last.
82 # setting it) for each tag is last.
83
83
84 def fnoderevs(ui, repo, revs):
84 def fnoderevs(ui, repo, revs):
85 """return the list of '.hgtags' fnodes used in a set revisions
85 """return the list of '.hgtags' fnodes used in a set revisions
86
86
87 This is returned as list of unique fnodes. We use a list instead of a set
87 This is returned as list of unique fnodes. We use a list instead of a set
88 because order matters when it comes to tags."""
88 because order matters when it comes to tags."""
89 unfi = repo.unfiltered()
89 unfi = repo.unfiltered()
90 tonode = unfi.changelog.node
90 tonode = unfi.changelog.node
91 nodes = [tonode(r) for r in revs]
91 nodes = [tonode(r) for r in revs]
92 fnodes = _getfnodes(ui, repo, nodes[::-1]) # reversed help the cache
92 fnodes = _getfnodes(ui, repo, nodes[::-1]) # reversed help the cache
93 fnodes = _filterfnodes(fnodes, nodes)
93 fnodes = _filterfnodes(fnodes, nodes)
94 return fnodes
94 return fnodes
95
95
96 def _nulltonone(value):
96 def _nulltonone(value):
97 """convert nullid to None
97 """convert nullid to None
98
98
99 For tag value, nullid means "deleted". This small utility function helps
99 For tag value, nullid means "deleted". This small utility function helps
100 translating that to None."""
100 translating that to None."""
101 if value == nullid:
101 if value == nullid:
102 return None
102 return None
103 return value
103 return value
104
104
105 def difftags(ui, repo, oldfnodes, newfnodes):
105 def difftags(ui, repo, oldfnodes, newfnodes):
106 """list differences between tags expressed in two set of file-nodes
106 """list differences between tags expressed in two set of file-nodes
107
107
108 The list contains entries in the form: (tagname, oldvalue, new value).
108 The list contains entries in the form: (tagname, oldvalue, new value).
109 None is used to expressed missing value:
109 None is used to expressed missing value:
110 ('foo', None, 'abcd') is a new tag,
110 ('foo', None, 'abcd') is a new tag,
111 ('bar', 'ef01', None) is a deletion,
111 ('bar', 'ef01', None) is a deletion,
112 ('baz', 'abcd', 'ef01') is a tag movement.
112 ('baz', 'abcd', 'ef01') is a tag movement.
113 """
113 """
114 if oldfnodes == newfnodes:
114 if oldfnodes == newfnodes:
115 return []
115 return []
116 oldtags = _tagsfromfnodes(ui, repo, oldfnodes)
116 oldtags = _tagsfromfnodes(ui, repo, oldfnodes)
117 newtags = _tagsfromfnodes(ui, repo, newfnodes)
117 newtags = _tagsfromfnodes(ui, repo, newfnodes)
118
118
119 # list of (tag, old, new): None means missing
119 # list of (tag, old, new): None means missing
120 entries = []
120 entries = []
121 for tag, (new, __) in newtags.items():
121 for tag, (new, __) in newtags.items():
122 new = _nulltonone(new)
122 new = _nulltonone(new)
123 old, __ = oldtags.pop(tag, (None, None))
123 old, __ = oldtags.pop(tag, (None, None))
124 old = _nulltonone(old)
124 old = _nulltonone(old)
125 if old != new:
125 if old != new:
126 entries.append((tag, old, new))
126 entries.append((tag, old, new))
127 # handle deleted tags
127 # handle deleted tags
128 for tag, (old, __) in oldtags.items():
128 for tag, (old, __) in oldtags.items():
129 old = _nulltonone(old)
129 old = _nulltonone(old)
130 if old is not None:
130 if old is not None:
131 entries.append((tag, old, None))
131 entries.append((tag, old, None))
132 entries.sort()
132 entries.sort()
133 return entries
133 return entries
134
134
135 def writediff(fp, difflist):
135 def writediff(fp, difflist):
136 """write tags diff information to a file.
136 """write tags diff information to a file.
137
137
138 Data are stored with a line based format:
138 Data are stored with a line based format:
139
139
140 <action> <hex-node> <tag-name>\n
140 <action> <hex-node> <tag-name>\n
141
141
142 Action are defined as follow:
142 Action are defined as follow:
143 -R tag is removed,
143 -R tag is removed,
144 +A tag is added,
144 +A tag is added,
145 -M tag is moved (old value),
145 -M tag is moved (old value),
146 +M tag is moved (new value),
146 +M tag is moved (new value),
147
147
148 Example:
148 Example:
149
149
150 +A 875517b4806a848f942811a315a5bce30804ae85 t5
150 +A 875517b4806a848f942811a315a5bce30804ae85 t5
151
151
152 See documentation of difftags output for details about the input.
152 See documentation of difftags output for details about the input.
153 """
153 """
154 add = '+A %s %s\n'
154 add = '+A %s %s\n'
155 remove = '-R %s %s\n'
155 remove = '-R %s %s\n'
156 updateold = '-M %s %s\n'
156 updateold = '-M %s %s\n'
157 updatenew = '+M %s %s\n'
157 updatenew = '+M %s %s\n'
158 for tag, old, new in difflist:
158 for tag, old, new in difflist:
159 # translate to hex
159 # translate to hex
160 if old is not None:
160 if old is not None:
161 old = hex(old)
161 old = hex(old)
162 if new is not None:
162 if new is not None:
163 new = hex(new)
163 new = hex(new)
164 # write to file
164 # write to file
165 if old is None:
165 if old is None:
166 fp.write(add % (new, tag))
166 fp.write(add % (new, tag))
167 elif new is None:
167 elif new is None:
168 fp.write(remove % (old, tag))
168 fp.write(remove % (old, tag))
169 else:
169 else:
170 fp.write(updateold % (old, tag))
170 fp.write(updateold % (old, tag))
171 fp.write(updatenew % (new, tag))
171 fp.write(updatenew % (new, tag))
172
172
173 def findglobaltags(ui, repo):
173 def findglobaltags(ui, repo):
174 '''Find global tags in a repo: return a tagsmap
174 '''Find global tags in a repo: return a tagsmap
175
175
176 tagsmap: tag name to (node, hist) 2-tuples.
176 tagsmap: tag name to (node, hist) 2-tuples.
177
177
178 The tags cache is read and updated as a side-effect of calling.
178 The tags cache is read and updated as a side-effect of calling.
179 '''
179 '''
180 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
180 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
181 if cachetags is not None:
181 if cachetags is not None:
182 assert not shouldwrite
182 assert not shouldwrite
183 # XXX is this really 100% correct? are there oddball special
183 # XXX is this really 100% correct? are there oddball special
184 # cases where a global tag should outrank a local tag but won't,
184 # cases where a global tag should outrank a local tag but won't,
185 # because cachetags does not contain rank info?
185 # because cachetags does not contain rank info?
186 alltags = {}
186 alltags = {}
187 _updatetags(cachetags, alltags)
187 _updatetags(cachetags, alltags)
188 return alltags
188 return alltags
189
189
190 for head in reversed(heads): # oldest to newest
190 for head in reversed(heads): # oldest to newest
191 assert head in repo.changelog.nodemap, (
191 assert head in repo.changelog.nodemap, (
192 "tag cache returned bogus head %s" % short(head))
192 "tag cache returned bogus head %s" % short(head))
193 fnodes = _filterfnodes(tagfnode, reversed(heads))
193 fnodes = _filterfnodes(tagfnode, reversed(heads))
194 alltags = _tagsfromfnodes(ui, repo, fnodes)
194 alltags = _tagsfromfnodes(ui, repo, fnodes)
195
195
196 # and update the cache (if necessary)
196 # and update the cache (if necessary)
197 if shouldwrite:
197 if shouldwrite:
198 _writetagcache(ui, repo, valid, alltags)
198 _writetagcache(ui, repo, valid, alltags)
199 return alltags
199 return alltags
200
200
201 def _filterfnodes(tagfnode, nodes):
201 def _filterfnodes(tagfnode, nodes):
202 """return a list of unique fnodes
202 """return a list of unique fnodes
203
203
204 The order of this list matches the order of "nodes". Preserving this order
204 The order of this list matches the order of "nodes". Preserving this order
205 is important as reading tags in different order provides different
205 is important as reading tags in different order provides different
206 results."""
206 results."""
207 seen = set() # set of fnode
207 seen = set() # set of fnode
208 fnodes = []
208 fnodes = []
209 for no in nodes: # oldest to newest
209 for no in nodes: # oldest to newest
210 fnode = tagfnode.get(no)
210 fnode = tagfnode.get(no)
211 if fnode and fnode not in seen:
211 if fnode and fnode not in seen:
212 seen.add(fnode)
212 seen.add(fnode)
213 fnodes.append(fnode)
213 fnodes.append(fnode)
214 return fnodes
214 return fnodes
215
215
216 def _tagsfromfnodes(ui, repo, fnodes):
216 def _tagsfromfnodes(ui, repo, fnodes):
217 """return a tagsmap from a list of file-node
217 """return a tagsmap from a list of file-node
218
218
219 tagsmap: tag name to (node, hist) 2-tuples.
219 tagsmap: tag name to (node, hist) 2-tuples.
220
220
221 The order of the list matters."""
221 The order of the list matters."""
222 alltags = {}
222 alltags = {}
223 fctx = None
223 fctx = None
224 for fnode in fnodes:
224 for fnode in fnodes:
225 if fctx is None:
225 if fctx is None:
226 fctx = repo.filectx('.hgtags', fileid=fnode)
226 fctx = repo.filectx('.hgtags', fileid=fnode)
227 else:
227 else:
228 fctx = fctx.filectx(fnode)
228 fctx = fctx.filectx(fnode)
229 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
229 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
230 _updatetags(filetags, alltags)
230 _updatetags(filetags, alltags)
231 return alltags
231 return alltags
232
232
233 def readlocaltags(ui, repo, alltags, tagtypes):
233 def readlocaltags(ui, repo, alltags, tagtypes):
234 '''Read local tags in repo. Update alltags and tagtypes.'''
234 '''Read local tags in repo. Update alltags and tagtypes.'''
235 try:
235 try:
236 data = repo.vfs.read("localtags")
236 data = repo.vfs.read("localtags")
237 except IOError as inst:
237 except IOError as inst:
238 if inst.errno != errno.ENOENT:
238 if inst.errno != errno.ENOENT:
239 raise
239 raise
240 return
240 return
241
241
242 # localtags is in the local encoding; re-encode to UTF-8 on
242 # localtags is in the local encoding; re-encode to UTF-8 on
243 # input for consistency with the rest of this module.
243 # input for consistency with the rest of this module.
244 filetags = _readtags(
244 filetags = _readtags(
245 ui, repo, data.splitlines(), "localtags",
245 ui, repo, data.splitlines(), "localtags",
246 recode=encoding.fromlocal)
246 recode=encoding.fromlocal)
247
247
248 # remove tags pointing to invalid nodes
248 # remove tags pointing to invalid nodes
249 cl = repo.changelog
249 cl = repo.changelog
250 for t in list(filetags):
250 for t in list(filetags):
251 try:
251 try:
252 cl.rev(filetags[t][0])
252 cl.rev(filetags[t][0])
253 except (LookupError, ValueError):
253 except (LookupError, ValueError):
254 del filetags[t]
254 del filetags[t]
255
255
256 _updatetags(filetags, alltags, 'local', tagtypes)
256 _updatetags(filetags, alltags, 'local', tagtypes)
257
257
258 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
258 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
259 '''Read tag definitions from a file (or any source of lines).
259 '''Read tag definitions from a file (or any source of lines).
260
260
261 This function returns two sortdicts with similar information:
261 This function returns two sortdicts with similar information:
262
262
263 - the first dict, bintaghist, contains the tag information as expected by
263 - the first dict, bintaghist, contains the tag information as expected by
264 the _readtags function, i.e. a mapping from tag name to (node, hist):
264 the _readtags function, i.e. a mapping from tag name to (node, hist):
265 - node is the node id from the last line read for that name,
265 - node is the node id from the last line read for that name,
266 - hist is the list of node ids previously associated with it (in file
266 - hist is the list of node ids previously associated with it (in file
267 order). All node ids are binary, not hex.
267 order). All node ids are binary, not hex.
268
268
269 - the second dict, hextaglines, is a mapping from tag name to a list of
269 - the second dict, hextaglines, is a mapping from tag name to a list of
270 [hexnode, line number] pairs, ordered from the oldest to the newest node.
270 [hexnode, line number] pairs, ordered from the oldest to the newest node.
271
271
272 When calcnodelines is False the hextaglines dict is not calculated (an
272 When calcnodelines is False the hextaglines dict is not calculated (an
273 empty dict is returned). This is done to improve this function's
273 empty dict is returned). This is done to improve this function's
274 performance in cases where the line numbers are not needed.
274 performance in cases where the line numbers are not needed.
275 '''
275 '''
276
276
277 bintaghist = util.sortdict()
277 bintaghist = util.sortdict()
278 hextaglines = util.sortdict()
278 hextaglines = util.sortdict()
279 count = 0
279 count = 0
280
280
281 def dbg(msg):
281 def dbg(msg):
282 ui.debug("%s, line %d: %s\n" % (fn, count, msg))
282 ui.debug("%s, line %d: %s\n" % (fn, count, msg))
283
283
284 for nline, line in enumerate(lines):
284 for nline, line in enumerate(lines):
285 count += 1
285 count += 1
286 if not line:
286 if not line:
287 continue
287 continue
288 try:
288 try:
289 (nodehex, name) = line.split(" ", 1)
289 (nodehex, name) = line.split(" ", 1)
290 except ValueError:
290 except ValueError:
291 dbg("cannot parse entry")
291 dbg("cannot parse entry")
292 continue
292 continue
293 name = name.strip()
293 name = name.strip()
294 if recode:
294 if recode:
295 name = recode(name)
295 name = recode(name)
296 try:
296 try:
297 nodebin = bin(nodehex)
297 nodebin = bin(nodehex)
298 except TypeError:
298 except TypeError:
299 dbg("node '%s' is not well formed" % nodehex)
299 dbg("node '%s' is not well formed" % nodehex)
300 continue
300 continue
301
301
302 # update filetags
302 # update filetags
303 if calcnodelines:
303 if calcnodelines:
304 # map tag name to a list of line numbers
304 # map tag name to a list of line numbers
305 if name not in hextaglines:
305 if name not in hextaglines:
306 hextaglines[name] = []
306 hextaglines[name] = []
307 hextaglines[name].append([nodehex, nline])
307 hextaglines[name].append([nodehex, nline])
308 continue
308 continue
309 # map tag name to (node, hist)
309 # map tag name to (node, hist)
310 if name not in bintaghist:
310 if name not in bintaghist:
311 bintaghist[name] = []
311 bintaghist[name] = []
312 bintaghist[name].append(nodebin)
312 bintaghist[name].append(nodebin)
313 return bintaghist, hextaglines
313 return bintaghist, hextaglines
314
314
315 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
315 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
316 '''Read tag definitions from a file (or any source of lines).
316 '''Read tag definitions from a file (or any source of lines).
317
317
318 Returns a mapping from tag name to (node, hist).
318 Returns a mapping from tag name to (node, hist).
319
319
320 "node" is the node id from the last line read for that name. "hist"
320 "node" is the node id from the last line read for that name. "hist"
321 is the list of node ids previously associated with it (in file order).
321 is the list of node ids previously associated with it (in file order).
322 All node ids are binary, not hex.
322 All node ids are binary, not hex.
323 '''
323 '''
324 filetags, nodelines = _readtaghist(ui, repo, lines, fn, recode=recode,
324 filetags, nodelines = _readtaghist(ui, repo, lines, fn, recode=recode,
325 calcnodelines=calcnodelines)
325 calcnodelines=calcnodelines)
326 # util.sortdict().__setitem__ is much slower at replacing then inserting
326 # util.sortdict().__setitem__ is much slower at replacing then inserting
327 # new entries. The difference can matter if there are thousands of tags.
327 # new entries. The difference can matter if there are thousands of tags.
328 # Create a new sortdict to avoid the performance penalty.
328 # Create a new sortdict to avoid the performance penalty.
329 newtags = util.sortdict()
329 newtags = util.sortdict()
330 for tag, taghist in filetags.items():
330 for tag, taghist in filetags.items():
331 newtags[tag] = (taghist[-1], taghist[:-1])
331 newtags[tag] = (taghist[-1], taghist[:-1])
332 return newtags
332 return newtags
333
333
334 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
334 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
335 """Incorporate the tag info read from one file into dictionnaries
335 """Incorporate the tag info read from one file into dictionnaries
336
336
337 The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
337 The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
338
338
339 The second one, 'tagtypes', is optional and will be updated to track the
339 The second one, 'tagtypes', is optional and will be updated to track the
340 "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
340 "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
341 needs to be set."""
341 needs to be set."""
342 if tagtype is None:
342 if tagtype is None:
343 assert tagtypes is None
343 assert tagtypes is None
344
344
345 for name, nodehist in filetags.iteritems():
345 for name, nodehist in filetags.iteritems():
346 if name not in alltags:
346 if name not in alltags:
347 alltags[name] = nodehist
347 alltags[name] = nodehist
348 if tagtype is not None:
348 if tagtype is not None:
349 tagtypes[name] = tagtype
349 tagtypes[name] = tagtype
350 continue
350 continue
351
351
352 # we prefer alltags[name] if:
352 # we prefer alltags[name] if:
353 # it supersedes us OR
353 # it supersedes us OR
354 # mutual supersedes and it has a higher rank
354 # mutual supersedes and it has a higher rank
355 # otherwise we win because we're tip-most
355 # otherwise we win because we're tip-most
356 anode, ahist = nodehist
356 anode, ahist = nodehist
357 bnode, bhist = alltags[name]
357 bnode, bhist = alltags[name]
358 if (bnode != anode and anode in bhist and
358 if (bnode != anode and anode in bhist and
359 (bnode not in ahist or len(bhist) > len(ahist))):
359 (bnode not in ahist or len(bhist) > len(ahist))):
360 anode = bnode
360 anode = bnode
361 elif tagtype is not None:
361 elif tagtype is not None:
362 tagtypes[name] = tagtype
362 tagtypes[name] = tagtype
363 ahist.extend([n for n in bhist if n not in ahist])
363 ahist.extend([n for n in bhist if n not in ahist])
364 alltags[name] = anode, ahist
364 alltags[name] = anode, ahist
365
365
366 def _filename(repo):
366 def _filename(repo):
367 """name of a tagcache file for a given repo or repoview"""
367 """name of a tagcache file for a given repo or repoview"""
368 filename = 'tags2'
368 filename = 'tags2'
369 if repo.filtername:
369 if repo.filtername:
370 filename = '%s-%s' % (filename, repo.filtername)
370 filename = '%s-%s' % (filename, repo.filtername)
371 return filename
371 return filename
372
372
373 def _readtagcache(ui, repo):
373 def _readtagcache(ui, repo):
374 '''Read the tag cache.
374 '''Read the tag cache.
375
375
376 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
376 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
377
377
378 If the cache is completely up-to-date, "cachetags" is a dict of the
378 If the cache is completely up-to-date, "cachetags" is a dict of the
379 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
379 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
380 None and "shouldwrite" is False.
380 None and "shouldwrite" is False.
381
381
382 If the cache is not up to date, "cachetags" is None. "heads" is a list
382 If the cache is not up to date, "cachetags" is None. "heads" is a list
383 of all heads currently in the repository, ordered from tip to oldest.
383 of all heads currently in the repository, ordered from tip to oldest.
384 "validinfo" is a tuple describing cache validation info. This is used
384 "validinfo" is a tuple describing cache validation info. This is used
385 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
385 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
386 filenode. "shouldwrite" is True.
386 filenode. "shouldwrite" is True.
387
387
388 If the cache is not up to date, the caller is responsible for reading tag
388 If the cache is not up to date, the caller is responsible for reading tag
389 info from each returned head. (See findglobaltags().)
389 info from each returned head. (See findglobaltags().)
390 '''
390 '''
391 try:
391 try:
392 cachefile = repo.cachevfs(_filename(repo), 'r')
392 cachefile = repo.cachevfs(_filename(repo), 'r')
393 # force reading the file for static-http
393 # force reading the file for static-http
394 cachelines = iter(cachefile)
394 cachelines = iter(cachefile)
395 except IOError:
395 except IOError:
396 cachefile = None
396 cachefile = None
397
397
398 cacherev = None
398 cacherev = None
399 cachenode = None
399 cachenode = None
400 cachehash = None
400 cachehash = None
401 if cachefile:
401 if cachefile:
402 try:
402 try:
403 validline = next(cachelines)
403 validline = next(cachelines)
404 validline = validline.split()
404 validline = validline.split()
405 cacherev = int(validline[0])
405 cacherev = int(validline[0])
406 cachenode = bin(validline[1])
406 cachenode = bin(validline[1])
407 if len(validline) > 2:
407 if len(validline) > 2:
408 cachehash = bin(validline[2])
408 cachehash = bin(validline[2])
409 except Exception:
409 except Exception:
410 # corruption of the cache, just recompute it.
410 # corruption of the cache, just recompute it.
411 pass
411 pass
412
412
413 tipnode = repo.changelog.tip()
413 tipnode = repo.changelog.tip()
414 tiprev = len(repo.changelog) - 1
414 tiprev = len(repo.changelog) - 1
415
415
416 # Case 1 (common): tip is the same, so nothing has changed.
416 # Case 1 (common): tip is the same, so nothing has changed.
417 # (Unchanged tip trivially means no changesets have been added.
417 # (Unchanged tip trivially means no changesets have been added.
418 # But, thanks to localrepository.destroyed(), it also means none
418 # But, thanks to localrepository.destroyed(), it also means none
419 # have been destroyed by strip or rollback.)
419 # have been destroyed by strip or rollback.)
420 if (cacherev == tiprev
420 if (cacherev == tiprev
421 and cachenode == tipnode
421 and cachenode == tipnode
422 and cachehash == scmutil.filteredhash(repo, tiprev)):
422 and cachehash == scmutil.filteredhash(repo, tiprev)):
423 tags = _readtags(ui, repo, cachelines, cachefile.name)
423 tags = _readtags(ui, repo, cachelines, cachefile.name)
424 cachefile.close()
424 cachefile.close()
425 return (None, None, None, tags, False)
425 return (None, None, None, tags, False)
426 if cachefile:
426 if cachefile:
427 cachefile.close() # ignore rest of file
427 cachefile.close() # ignore rest of file
428
428
429 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
429 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
430
430
431 repoheads = repo.heads()
431 repoheads = repo.heads()
432 # Case 2 (uncommon): empty repo; get out quickly and don't bother
432 # Case 2 (uncommon): empty repo; get out quickly and don't bother
433 # writing an empty cache.
433 # writing an empty cache.
434 if repoheads == [nullid]:
434 if repoheads == [nullid]:
435 return ([], {}, valid, {}, False)
435 return ([], {}, valid, {}, False)
436
436
437 # Case 3 (uncommon): cache file missing or empty.
437 # Case 3 (uncommon): cache file missing or empty.
438
438
439 # Case 4 (uncommon): tip rev decreased. This should only happen
439 # Case 4 (uncommon): tip rev decreased. This should only happen
440 # when we're called from localrepository.destroyed(). Refresh the
440 # when we're called from localrepository.destroyed(). Refresh the
441 # cache so future invocations will not see disappeared heads in the
441 # cache so future invocations will not see disappeared heads in the
442 # cache.
442 # cache.
443
443
444 # Case 5 (common): tip has changed, so we've added/replaced heads.
444 # Case 5 (common): tip has changed, so we've added/replaced heads.
445
445
446 # As it happens, the code to handle cases 3, 4, 5 is the same.
446 # As it happens, the code to handle cases 3, 4, 5 is the same.
447
447
448 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
448 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
449 # exposed".
449 # exposed".
450 if not len(repo.file('.hgtags')):
450 if not len(repo.file('.hgtags')):
451 # No tags have ever been committed, so we can avoid a
451 # No tags have ever been committed, so we can avoid a
452 # potentially expensive search.
452 # potentially expensive search.
453 return ([], {}, valid, None, True)
453 return ([], {}, valid, None, True)
454
454
455
455
456 # Now we have to lookup the .hgtags filenode for every new head.
456 # Now we have to lookup the .hgtags filenode for every new head.
457 # This is the most expensive part of finding tags, so performance
457 # This is the most expensive part of finding tags, so performance
458 # depends primarily on the size of newheads. Worst case: no cache
458 # depends primarily on the size of newheads. Worst case: no cache
459 # file, so newheads == repoheads.
459 # file, so newheads == repoheads.
460 cachefnode = _getfnodes(ui, repo, repoheads)
460 cachefnode = _getfnodes(ui, repo, repoheads)
461
461
462 # Caller has to iterate over all heads, but can use the filenodes in
462 # Caller has to iterate over all heads, but can use the filenodes in
463 # cachefnode to get to each .hgtags revision quickly.
463 # cachefnode to get to each .hgtags revision quickly.
464 return (repoheads, cachefnode, valid, None, True)
464 return (repoheads, cachefnode, valid, None, True)
465
465
466 def _getfnodes(ui, repo, nodes):
466 def _getfnodes(ui, repo, nodes):
467 """return .hgtags fnodes for a list of changeset nodes
467 """return .hgtags fnodes for a list of changeset nodes
468
468
469 Return value is a {node: fnode} mapping. There will be no entry for nodes
469 Return value is a {node: fnode} mapping. There will be no entry for nodes
470 without a '.hgtags' file.
470 without a '.hgtags' file.
471 """
471 """
472 starttime = util.timer()
472 starttime = util.timer()
473 fnodescache = hgtagsfnodescache(repo.unfiltered())
473 fnodescache = hgtagsfnodescache(repo.unfiltered())
474 cachefnode = {}
474 cachefnode = {}
475 for node in reversed(nodes):
475 for node in reversed(nodes):
476 fnode = fnodescache.getfnode(node)
476 fnode = fnodescache.getfnode(node)
477 if fnode != nullid:
477 if fnode != nullid:
478 cachefnode[node] = fnode
478 cachefnode[node] = fnode
479
479
480 fnodescache.write()
480 fnodescache.write()
481
481
482 duration = util.timer() - starttime
482 duration = util.timer() - starttime
483 ui.log('tagscache',
483 ui.log('tagscache',
484 '%d/%d cache hits/lookups in %0.4f seconds\n',
484 '%d/%d cache hits/lookups in %0.4f seconds\n',
485 fnodescache.hitcount, fnodescache.lookupcount, duration)
485 fnodescache.hitcount, fnodescache.lookupcount, duration)
486 return cachefnode
486 return cachefnode
487
487
488 def _writetagcache(ui, repo, valid, cachetags):
488 def _writetagcache(ui, repo, valid, cachetags):
489 filename = _filename(repo)
489 filename = _filename(repo)
490 try:
490 try:
491 cachefile = repo.cachevfs(filename, 'w', atomictemp=True)
491 cachefile = repo.cachevfs(filename, 'w', atomictemp=True)
492 except (OSError, IOError):
492 except (OSError, IOError):
493 return
493 return
494
494
495 ui.log('tagscache', 'writing .hg/cache/%s with %d tags\n',
495 ui.log('tagscache', 'writing .hg/cache/%s with %d tags\n',
496 filename, len(cachetags))
496 filename, len(cachetags))
497
497
498 if valid[2]:
498 if valid[2]:
499 cachefile.write('%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2])))
499 cachefile.write('%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2])))
500 else:
500 else:
501 cachefile.write('%d %s\n' % (valid[0], hex(valid[1])))
501 cachefile.write('%d %s\n' % (valid[0], hex(valid[1])))
502
502
503 # Tag names in the cache are in UTF-8 -- which is the whole reason
503 # Tag names in the cache are in UTF-8 -- which is the whole reason
504 # we keep them in UTF-8 throughout this module. If we converted
504 # we keep them in UTF-8 throughout this module. If we converted
505 # them local encoding on input, we would lose info writing them to
505 # them local encoding on input, we would lose info writing them to
506 # the cache.
506 # the cache.
507 for (name, (node, hist)) in sorted(cachetags.iteritems()):
507 for (name, (node, hist)) in sorted(cachetags.iteritems()):
508 for n in hist:
508 for n in hist:
509 cachefile.write("%s %s\n" % (hex(n), name))
509 cachefile.write("%s %s\n" % (hex(n), name))
510 cachefile.write("%s %s\n" % (hex(node), name))
510 cachefile.write("%s %s\n" % (hex(node), name))
511
511
512 try:
512 try:
513 cachefile.close()
513 cachefile.close()
514 except (OSError, IOError):
514 except (OSError, IOError):
515 pass
515 pass
516
516
517 def tag(repo, names, node, message, local, user, date, editor=False):
517 def tag(repo, names, node, message, local, user, date, editor=False):
518 '''tag a revision with one or more symbolic names.
518 '''tag a revision with one or more symbolic names.
519
519
520 names is a list of strings or, when adding a single tag, names may be a
520 names is a list of strings or, when adding a single tag, names may be a
521 string.
521 string.
522
522
523 if local is True, the tags are stored in a per-repository file.
523 if local is True, the tags are stored in a per-repository file.
524 otherwise, they are stored in the .hgtags file, and a new
524 otherwise, they are stored in the .hgtags file, and a new
525 changeset is committed with the change.
525 changeset is committed with the change.
526
526
527 keyword arguments:
527 keyword arguments:
528
528
529 local: whether to store tags in non-version-controlled file
529 local: whether to store tags in non-version-controlled file
530 (default False)
530 (default False)
531
531
532 message: commit message to use if committing
532 message: commit message to use if committing
533
533
534 user: name of user to use if committing
534 user: name of user to use if committing
535
535
536 date: date tuple to use if committing'''
536 date: date tuple to use if committing'''
537
537
538 if not local:
538 if not local:
539 m = matchmod.exact(['.hgtags'])
539 m = matchmod.exact(['.hgtags'])
540 if any(repo.status(match=m, unknown=True, ignored=True)):
540 if any(repo.status(match=m, unknown=True, ignored=True)):
541 raise error.Abort(_('working copy of .hgtags is changed'),
541 raise error.Abort(_('working copy of .hgtags is changed'),
542 hint=_('please commit .hgtags manually'))
542 hint=_('please commit .hgtags manually'))
543
543
544 with repo.wlock():
544 with repo.wlock():
545 repo.tags() # instantiate the cache
545 repo.tags() # instantiate the cache
546 _tag(repo, names, node, message, local, user, date,
546 _tag(repo, names, node, message, local, user, date,
547 editor=editor)
547 editor=editor)
548
548
549 def _tag(repo, names, node, message, local, user, date, extra=None,
549 def _tag(repo, names, node, message, local, user, date, extra=None,
550 editor=False):
550 editor=False):
551 if isinstance(names, bytes):
551 if isinstance(names, bytes):
552 names = (names,)
552 names = (names,)
553
553
554 branches = repo.branchmap()
554 branches = repo.branchmap()
555 for name in names:
555 for name in names:
556 repo.hook('pretag', throw=True, node=hex(node), tag=name,
556 repo.hook('pretag', throw=True, node=hex(node), tag=name,
557 local=local)
557 local=local)
558 if name in branches:
558 if name in branches:
559 repo.ui.warn(_("warning: tag %s conflicts with existing"
559 repo.ui.warn(_("warning: tag %s conflicts with existing"
560 " branch name\n") % name)
560 " branch name\n") % name)
561
561
562 def writetags(fp, names, munge, prevtags):
562 def writetags(fp, names, munge, prevtags):
563 fp.seek(0, 2)
563 fp.seek(0, 2)
564 if prevtags and not prevtags.endswith('\n'):
564 if prevtags and not prevtags.endswith('\n'):
565 fp.write('\n')
565 fp.write('\n')
566 for name in names:
566 for name in names:
567 if munge:
567 if munge:
568 m = munge(name)
568 m = munge(name)
569 else:
569 else:
570 m = name
570 m = name
571
571
572 if (repo._tagscache.tagtypes and
572 if (repo._tagscache.tagtypes and
573 name in repo._tagscache.tagtypes):
573 name in repo._tagscache.tagtypes):
574 old = repo.tags().get(name, nullid)
574 old = repo.tags().get(name, nullid)
575 fp.write('%s %s\n' % (hex(old), m))
575 fp.write('%s %s\n' % (hex(old), m))
576 fp.write('%s %s\n' % (hex(node), m))
576 fp.write('%s %s\n' % (hex(node), m))
577 fp.close()
577 fp.close()
578
578
579 prevtags = ''
579 prevtags = ''
580 if local:
580 if local:
581 try:
581 try:
582 fp = repo.vfs('localtags', 'r+')
582 fp = repo.vfs('localtags', 'r+')
583 except IOError:
583 except IOError:
584 fp = repo.vfs('localtags', 'a')
584 fp = repo.vfs('localtags', 'a')
585 else:
585 else:
586 prevtags = fp.read()
586 prevtags = fp.read()
587
587
588 # local tags are stored in the current charset
588 # local tags are stored in the current charset
589 writetags(fp, names, None, prevtags)
589 writetags(fp, names, None, prevtags)
590 for name in names:
590 for name in names:
591 repo.hook('tag', node=hex(node), tag=name, local=local)
591 repo.hook('tag', node=hex(node), tag=name, local=local)
592 return
592 return
593
593
594 try:
594 try:
595 fp = repo.wvfs('.hgtags', 'rb+')
595 fp = repo.wvfs('.hgtags', 'rb+')
596 except IOError as e:
596 except IOError as e:
597 if e.errno != errno.ENOENT:
597 if e.errno != errno.ENOENT:
598 raise
598 raise
599 fp = repo.wvfs('.hgtags', 'ab')
599 fp = repo.wvfs('.hgtags', 'ab')
600 else:
600 else:
601 prevtags = fp.read()
601 prevtags = fp.read()
602
602
603 # committed tags are stored in UTF-8
603 # committed tags are stored in UTF-8
604 writetags(fp, names, encoding.fromlocal, prevtags)
604 writetags(fp, names, encoding.fromlocal, prevtags)
605
605
606 fp.close()
606 fp.close()
607
607
608 repo.invalidatecaches()
608 repo.invalidatecaches()
609
609
610 if '.hgtags' not in repo.dirstate:
610 if '.hgtags' not in repo.dirstate:
611 repo[None].add(['.hgtags'])
611 repo[None].add(['.hgtags'])
612
612
613 m = matchmod.exact(['.hgtags'])
613 m = matchmod.exact(['.hgtags'])
614 tagnode = repo.commit(message, user, date, extra=extra, match=m,
614 tagnode = repo.commit(message, user, date, extra=extra, match=m,
615 editor=editor)
615 editor=editor)
616
616
617 for name in names:
617 for name in names:
618 repo.hook('tag', node=hex(node), tag=name, local=local)
618 repo.hook('tag', node=hex(node), tag=name, local=local)
619
619
620 return tagnode
620 return tagnode
621
621
622 _fnodescachefile = 'hgtagsfnodes1'
622 _fnodescachefile = 'hgtagsfnodes1'
623 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
623 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
624 _fnodesmissingrec = '\xff' * 24
624 _fnodesmissingrec = '\xff' * 24
625
625
626 class hgtagsfnodescache(object):
626 class hgtagsfnodescache(object):
627 """Persistent cache mapping revisions to .hgtags filenodes.
627 """Persistent cache mapping revisions to .hgtags filenodes.
628
628
629 The cache is an array of records. Each item in the array corresponds to
629 The cache is an array of records. Each item in the array corresponds to
630 a changelog revision. Values in the array contain the first 4 bytes of
630 a changelog revision. Values in the array contain the first 4 bytes of
631 the node hash and the 20 bytes .hgtags filenode for that revision.
631 the node hash and the 20 bytes .hgtags filenode for that revision.
632
632
633 The first 4 bytes are present as a form of verification. Repository
633 The first 4 bytes are present as a form of verification. Repository
634 stripping and rewriting may change the node at a numeric revision in the
634 stripping and rewriting may change the node at a numeric revision in the
635 changelog. The changeset fragment serves as a verifier to detect
635 changelog. The changeset fragment serves as a verifier to detect
636 rewriting. This logic is shared with the rev branch cache (see
636 rewriting. This logic is shared with the rev branch cache (see
637 branchmap.py).
637 branchmap.py).
638
638
639 The instance holds in memory the full cache content but entries are
639 The instance holds in memory the full cache content but entries are
640 only parsed on read.
640 only parsed on read.
641
641
642 Instances behave like lists. ``c[i]`` works where i is a rev or
642 Instances behave like lists. ``c[i]`` works where i is a rev or
643 changeset node. Missing indexes are populated automatically on access.
643 changeset node. Missing indexes are populated automatically on access.
644 """
644 """
645 def __init__(self, repo):
645 def __init__(self, repo):
646 assert repo.filtername is None
646 assert repo.filtername is None
647
647
648 self._repo = repo
648 self._repo = repo
649
649
650 # Only for reporting purposes.
650 # Only for reporting purposes.
651 self.lookupcount = 0
651 self.lookupcount = 0
652 self.hitcount = 0
652 self.hitcount = 0
653
653
654
654
655 try:
655 try:
656 data = repo.cachevfs.read(_fnodescachefile)
656 data = repo.cachevfs.read(_fnodescachefile)
657 except (OSError, IOError):
657 except (OSError, IOError):
658 data = ""
658 data = ""
659 self._raw = bytearray(data)
659 self._raw = bytearray(data)
660
660
661 # The end state of self._raw is an array that is of the exact length
661 # The end state of self._raw is an array that is of the exact length
662 # required to hold a record for every revision in the repository.
662 # required to hold a record for every revision in the repository.
663 # We truncate or extend the array as necessary. self._dirtyoffset is
663 # We truncate or extend the array as necessary. self._dirtyoffset is
664 # defined to be the start offset at which we need to write the output
664 # defined to be the start offset at which we need to write the output
665 # file. This offset is also adjusted when new entries are calculated
665 # file. This offset is also adjusted when new entries are calculated
666 # for array members.
666 # for array members.
667 cllen = len(repo.changelog)
667 cllen = len(repo.changelog)
668 wantedlen = cllen * _fnodesrecsize
668 wantedlen = cllen * _fnodesrecsize
669 rawlen = len(self._raw)
669 rawlen = len(self._raw)
670
670
671 self._dirtyoffset = None
671 self._dirtyoffset = None
672
672
673 if rawlen < wantedlen:
673 if rawlen < wantedlen:
674 self._dirtyoffset = rawlen
674 self._dirtyoffset = rawlen
675 self._raw.extend('\xff' * (wantedlen - rawlen))
675 self._raw.extend('\xff' * (wantedlen - rawlen))
676 elif rawlen > wantedlen:
676 elif rawlen > wantedlen:
677 # There's no easy way to truncate array instances. This seems
677 # There's no easy way to truncate array instances. This seems
678 # slightly less evil than copying a potentially large array slice.
678 # slightly less evil than copying a potentially large array slice.
679 for i in range(rawlen - wantedlen):
679 for i in range(rawlen - wantedlen):
680 self._raw.pop()
680 self._raw.pop()
681 self._dirtyoffset = len(self._raw)
681 self._dirtyoffset = len(self._raw)
682
682
683 def getfnode(self, node, computemissing=True):
683 def getfnode(self, node, computemissing=True):
684 """Obtain the filenode of the .hgtags file at a specified revision.
684 """Obtain the filenode of the .hgtags file at a specified revision.
685
685
686 If the value is in the cache, the entry will be validated and returned.
686 If the value is in the cache, the entry will be validated and returned.
687 Otherwise, the filenode will be computed and returned unless
687 Otherwise, the filenode will be computed and returned unless
688 "computemissing" is False, in which case None will be returned without
688 "computemissing" is False, in which case None will be returned without
689 any potentially expensive computation being performed.
689 any potentially expensive computation being performed.
690
690
691 If an .hgtags does not exist at the specified revision, nullid is
691 If an .hgtags does not exist at the specified revision, nullid is
692 returned.
692 returned.
693 """
693 """
694 if node == nullid:
695 return nullid
696
694 ctx = self._repo[node]
697 ctx = self._repo[node]
695 rev = ctx.rev()
698 rev = ctx.rev()
696
699
697 self.lookupcount += 1
700 self.lookupcount += 1
698
701
699 offset = rev * _fnodesrecsize
702 offset = rev * _fnodesrecsize
700 record = '%s' % self._raw[offset:offset + _fnodesrecsize]
703 record = '%s' % self._raw[offset:offset + _fnodesrecsize]
701 properprefix = node[0:4]
704 properprefix = node[0:4]
702
705
703 # Validate and return existing entry.
706 # Validate and return existing entry.
704 if record != _fnodesmissingrec:
707 if record != _fnodesmissingrec:
705 fileprefix = record[0:4]
708 fileprefix = record[0:4]
706
709
707 if fileprefix == properprefix:
710 if fileprefix == properprefix:
708 self.hitcount += 1
711 self.hitcount += 1
709 return record[4:]
712 return record[4:]
710
713
711 # Fall through.
714 # Fall through.
712
715
713 # If we get here, the entry is either missing or invalid.
716 # If we get here, the entry is either missing or invalid.
714
717
715 if not computemissing:
718 if not computemissing:
716 return None
719 return None
717
720
718 # Populate missing entry.
721 # Populate missing entry.
719 try:
722 try:
720 fnode = ctx.filenode('.hgtags')
723 fnode = ctx.filenode('.hgtags')
721 except error.LookupError:
724 except error.LookupError:
722 # No .hgtags file on this revision.
725 # No .hgtags file on this revision.
723 fnode = nullid
726 fnode = nullid
724
727
725 self._writeentry(offset, properprefix, fnode)
728 self._writeentry(offset, properprefix, fnode)
726 return fnode
729 return fnode
727
730
728 def setfnode(self, node, fnode):
731 def setfnode(self, node, fnode):
729 """Set the .hgtags filenode for a given changeset."""
732 """Set the .hgtags filenode for a given changeset."""
730 assert len(fnode) == 20
733 assert len(fnode) == 20
731 ctx = self._repo[node]
734 ctx = self._repo[node]
732
735
733 # Do a lookup first to avoid writing if nothing has changed.
736 # Do a lookup first to avoid writing if nothing has changed.
734 if self.getfnode(ctx.node(), computemissing=False) == fnode:
737 if self.getfnode(ctx.node(), computemissing=False) == fnode:
735 return
738 return
736
739
737 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
740 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
738
741
739 def _writeentry(self, offset, prefix, fnode):
742 def _writeentry(self, offset, prefix, fnode):
740 # Slices on array instances only accept other array.
743 # Slices on array instances only accept other array.
741 entry = bytearray(prefix + fnode)
744 entry = bytearray(prefix + fnode)
742 self._raw[offset:offset + _fnodesrecsize] = entry
745 self._raw[offset:offset + _fnodesrecsize] = entry
743 # self._dirtyoffset could be None.
746 # self._dirtyoffset could be None.
744 self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0)
747 self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0)
745
748
746 def write(self):
749 def write(self):
747 """Perform all necessary writes to cache file.
750 """Perform all necessary writes to cache file.
748
751
749 This may no-op if no writes are needed or if a write lock could
752 This may no-op if no writes are needed or if a write lock could
750 not be obtained.
753 not be obtained.
751 """
754 """
752 if self._dirtyoffset is None:
755 if self._dirtyoffset is None:
753 return
756 return
754
757
755 data = self._raw[self._dirtyoffset:]
758 data = self._raw[self._dirtyoffset:]
756 if not data:
759 if not data:
757 return
760 return
758
761
759 repo = self._repo
762 repo = self._repo
760
763
761 try:
764 try:
762 lock = repo.wlock(wait=False)
765 lock = repo.wlock(wait=False)
763 except error.LockError:
766 except error.LockError:
764 repo.ui.log('tagscache', 'not writing .hg/cache/%s because '
767 repo.ui.log('tagscache', 'not writing .hg/cache/%s because '
765 'lock cannot be acquired\n' % (_fnodescachefile))
768 'lock cannot be acquired\n' % (_fnodescachefile))
766 return
769 return
767
770
768 try:
771 try:
769 f = repo.cachevfs.open(_fnodescachefile, 'ab')
772 f = repo.cachevfs.open(_fnodescachefile, 'ab')
770 try:
773 try:
771 # if the file has been truncated
774 # if the file has been truncated
772 actualoffset = f.tell()
775 actualoffset = f.tell()
773 if actualoffset < self._dirtyoffset:
776 if actualoffset < self._dirtyoffset:
774 self._dirtyoffset = actualoffset
777 self._dirtyoffset = actualoffset
775 data = self._raw[self._dirtyoffset:]
778 data = self._raw[self._dirtyoffset:]
776 f.seek(self._dirtyoffset)
779 f.seek(self._dirtyoffset)
777 f.truncate()
780 f.truncate()
778 repo.ui.log('tagscache',
781 repo.ui.log('tagscache',
779 'writing %d bytes to cache/%s\n' % (
782 'writing %d bytes to cache/%s\n' % (
780 len(data), _fnodescachefile))
783 len(data), _fnodescachefile))
781 f.write(data)
784 f.write(data)
782 self._dirtyoffset = None
785 self._dirtyoffset = None
783 finally:
786 finally:
784 f.close()
787 f.close()
785 except (IOError, OSError) as inst:
788 except (IOError, OSError) as inst:
786 repo.ui.log('tagscache',
789 repo.ui.log('tagscache',
787 "couldn't write cache/%s: %s\n" % (
790 "couldn't write cache/%s: %s\n" % (
788 _fnodescachefile, stringutil.forcebytestr(inst)))
791 _fnodescachefile, stringutil.forcebytestr(inst)))
789 finally:
792 finally:
790 lock.release()
793 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now