##// END OF EJS Templates
index: use `index.has_node` in `tags.findglobaltags`...
marmoute -
r43849:103b4430 default draft
parent child Browse files
Show More
@@ -1,861 +1,861 b''
1 # tags.py - read tag info from local repository
1 # tags.py - read tag info from local repository
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 # Currently this module only deals with reading and caching tags.
9 # Currently this module only deals with reading and caching tags.
10 # Eventually, it could take care of updating (adding/removing/moving)
10 # Eventually, it could take care of updating (adding/removing/moving)
11 # tags too.
11 # tags too.
12
12
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import errno
15 import errno
16 import io
16 import io
17
17
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .i18n import _
25 from .i18n import _
26 from . import (
26 from . import (
27 encoding,
27 encoding,
28 error,
28 error,
29 match as matchmod,
29 match as matchmod,
30 pycompat,
30 pycompat,
31 scmutil,
31 scmutil,
32 util,
32 util,
33 )
33 )
34 from .utils import stringutil
34 from .utils import stringutil
35
35
36 # Tags computation can be expensive and caches exist to make it fast in
36 # Tags computation can be expensive and caches exist to make it fast in
37 # the common case.
37 # the common case.
38 #
38 #
39 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
39 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
40 # each revision in the repository. The file is effectively an array of
40 # each revision in the repository. The file is effectively an array of
41 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
41 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
42 # details.
42 # details.
43 #
43 #
44 # The .hgtags filenode cache grows in proportion to the length of the
44 # The .hgtags filenode cache grows in proportion to the length of the
45 # changelog. The file is truncated when the # changelog is stripped.
45 # changelog. The file is truncated when the # changelog is stripped.
46 #
46 #
47 # The purpose of the filenode cache is to avoid the most expensive part
47 # The purpose of the filenode cache is to avoid the most expensive part
48 # of finding global tags, which is looking up the .hgtags filenode in the
48 # of finding global tags, which is looking up the .hgtags filenode in the
49 # manifest for each head. This can take dozens or over 100ms for
49 # manifest for each head. This can take dozens or over 100ms for
50 # repositories with very large manifests. Multiplied by dozens or even
50 # repositories with very large manifests. Multiplied by dozens or even
51 # hundreds of heads and there is a significant performance concern.
51 # hundreds of heads and there is a significant performance concern.
52 #
52 #
53 # There also exist a separate cache file for each repository filter.
53 # There also exist a separate cache file for each repository filter.
54 # These "tags-*" files store information about the history of tags.
54 # These "tags-*" files store information about the history of tags.
55 #
55 #
56 # The tags cache files consists of a cache validation line followed by
56 # The tags cache files consists of a cache validation line followed by
57 # a history of tags.
57 # a history of tags.
58 #
58 #
59 # The cache validation line has the format:
59 # The cache validation line has the format:
60 #
60 #
61 # <tiprev> <tipnode> [<filteredhash>]
61 # <tiprev> <tipnode> [<filteredhash>]
62 #
62 #
63 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
63 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
64 # node for that changeset. These redundantly identify the repository
64 # node for that changeset. These redundantly identify the repository
65 # tip from the time the cache was written. In addition, <filteredhash>,
65 # tip from the time the cache was written. In addition, <filteredhash>,
66 # if present, is a 40 character hex hash of the contents of the filtered
66 # if present, is a 40 character hex hash of the contents of the filtered
67 # revisions for this filter. If the set of filtered revs changes, the
67 # revisions for this filter. If the set of filtered revs changes, the
68 # hash will change and invalidate the cache.
68 # hash will change and invalidate the cache.
69 #
69 #
70 # The history part of the tags cache consists of lines of the form:
70 # The history part of the tags cache consists of lines of the form:
71 #
71 #
72 # <node> <tag>
72 # <node> <tag>
73 #
73 #
74 # (This format is identical to that of .hgtags files.)
74 # (This format is identical to that of .hgtags files.)
75 #
75 #
76 # <tag> is the tag name and <node> is the 40 character hex changeset
76 # <tag> is the tag name and <node> is the 40 character hex changeset
77 # the tag is associated with.
77 # the tag is associated with.
78 #
78 #
79 # Tags are written sorted by tag name.
79 # Tags are written sorted by tag name.
80 #
80 #
81 # Tags associated with multiple changesets have an entry for each changeset.
81 # Tags associated with multiple changesets have an entry for each changeset.
82 # The most recent changeset (in terms of revlog ordering for the head
82 # The most recent changeset (in terms of revlog ordering for the head
83 # setting it) for each tag is last.
83 # setting it) for each tag is last.
84
84
85
85
86 def fnoderevs(ui, repo, revs):
86 def fnoderevs(ui, repo, revs):
87 """return the list of '.hgtags' fnodes used in a set revisions
87 """return the list of '.hgtags' fnodes used in a set revisions
88
88
89 This is returned as list of unique fnodes. We use a list instead of a set
89 This is returned as list of unique fnodes. We use a list instead of a set
90 because order matters when it comes to tags."""
90 because order matters when it comes to tags."""
91 unfi = repo.unfiltered()
91 unfi = repo.unfiltered()
92 tonode = unfi.changelog.node
92 tonode = unfi.changelog.node
93 nodes = [tonode(r) for r in revs]
93 nodes = [tonode(r) for r in revs]
94 fnodes = _getfnodes(ui, repo, nodes)
94 fnodes = _getfnodes(ui, repo, nodes)
95 fnodes = _filterfnodes(fnodes, nodes)
95 fnodes = _filterfnodes(fnodes, nodes)
96 return fnodes
96 return fnodes
97
97
98
98
99 def _nulltonone(value):
99 def _nulltonone(value):
100 """convert nullid to None
100 """convert nullid to None
101
101
102 For tag value, nullid means "deleted". This small utility function helps
102 For tag value, nullid means "deleted". This small utility function helps
103 translating that to None."""
103 translating that to None."""
104 if value == nullid:
104 if value == nullid:
105 return None
105 return None
106 return value
106 return value
107
107
108
108
109 def difftags(ui, repo, oldfnodes, newfnodes):
109 def difftags(ui, repo, oldfnodes, newfnodes):
110 """list differences between tags expressed in two set of file-nodes
110 """list differences between tags expressed in two set of file-nodes
111
111
112 The list contains entries in the form: (tagname, oldvalue, new value).
112 The list contains entries in the form: (tagname, oldvalue, new value).
113 None is used to expressed missing value:
113 None is used to expressed missing value:
114 ('foo', None, 'abcd') is a new tag,
114 ('foo', None, 'abcd') is a new tag,
115 ('bar', 'ef01', None) is a deletion,
115 ('bar', 'ef01', None) is a deletion,
116 ('baz', 'abcd', 'ef01') is a tag movement.
116 ('baz', 'abcd', 'ef01') is a tag movement.
117 """
117 """
118 if oldfnodes == newfnodes:
118 if oldfnodes == newfnodes:
119 return []
119 return []
120 oldtags = _tagsfromfnodes(ui, repo, oldfnodes)
120 oldtags = _tagsfromfnodes(ui, repo, oldfnodes)
121 newtags = _tagsfromfnodes(ui, repo, newfnodes)
121 newtags = _tagsfromfnodes(ui, repo, newfnodes)
122
122
123 # list of (tag, old, new): None means missing
123 # list of (tag, old, new): None means missing
124 entries = []
124 entries = []
125 for tag, (new, __) in newtags.items():
125 for tag, (new, __) in newtags.items():
126 new = _nulltonone(new)
126 new = _nulltonone(new)
127 old, __ = oldtags.pop(tag, (None, None))
127 old, __ = oldtags.pop(tag, (None, None))
128 old = _nulltonone(old)
128 old = _nulltonone(old)
129 if old != new:
129 if old != new:
130 entries.append((tag, old, new))
130 entries.append((tag, old, new))
131 # handle deleted tags
131 # handle deleted tags
132 for tag, (old, __) in oldtags.items():
132 for tag, (old, __) in oldtags.items():
133 old = _nulltonone(old)
133 old = _nulltonone(old)
134 if old is not None:
134 if old is not None:
135 entries.append((tag, old, None))
135 entries.append((tag, old, None))
136 entries.sort()
136 entries.sort()
137 return entries
137 return entries
138
138
139
139
140 def writediff(fp, difflist):
140 def writediff(fp, difflist):
141 """write tags diff information to a file.
141 """write tags diff information to a file.
142
142
143 Data are stored with a line based format:
143 Data are stored with a line based format:
144
144
145 <action> <hex-node> <tag-name>\n
145 <action> <hex-node> <tag-name>\n
146
146
147 Action are defined as follow:
147 Action are defined as follow:
148 -R tag is removed,
148 -R tag is removed,
149 +A tag is added,
149 +A tag is added,
150 -M tag is moved (old value),
150 -M tag is moved (old value),
151 +M tag is moved (new value),
151 +M tag is moved (new value),
152
152
153 Example:
153 Example:
154
154
155 +A 875517b4806a848f942811a315a5bce30804ae85 t5
155 +A 875517b4806a848f942811a315a5bce30804ae85 t5
156
156
157 See documentation of difftags output for details about the input.
157 See documentation of difftags output for details about the input.
158 """
158 """
159 add = b'+A %s %s\n'
159 add = b'+A %s %s\n'
160 remove = b'-R %s %s\n'
160 remove = b'-R %s %s\n'
161 updateold = b'-M %s %s\n'
161 updateold = b'-M %s %s\n'
162 updatenew = b'+M %s %s\n'
162 updatenew = b'+M %s %s\n'
163 for tag, old, new in difflist:
163 for tag, old, new in difflist:
164 # translate to hex
164 # translate to hex
165 if old is not None:
165 if old is not None:
166 old = hex(old)
166 old = hex(old)
167 if new is not None:
167 if new is not None:
168 new = hex(new)
168 new = hex(new)
169 # write to file
169 # write to file
170 if old is None:
170 if old is None:
171 fp.write(add % (new, tag))
171 fp.write(add % (new, tag))
172 elif new is None:
172 elif new is None:
173 fp.write(remove % (old, tag))
173 fp.write(remove % (old, tag))
174 else:
174 else:
175 fp.write(updateold % (old, tag))
175 fp.write(updateold % (old, tag))
176 fp.write(updatenew % (new, tag))
176 fp.write(updatenew % (new, tag))
177
177
178
178
179 def findglobaltags(ui, repo):
179 def findglobaltags(ui, repo):
180 '''Find global tags in a repo: return a tagsmap
180 '''Find global tags in a repo: return a tagsmap
181
181
182 tagsmap: tag name to (node, hist) 2-tuples.
182 tagsmap: tag name to (node, hist) 2-tuples.
183
183
184 The tags cache is read and updated as a side-effect of calling.
184 The tags cache is read and updated as a side-effect of calling.
185 '''
185 '''
186 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
186 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
187 if cachetags is not None:
187 if cachetags is not None:
188 assert not shouldwrite
188 assert not shouldwrite
189 # XXX is this really 100% correct? are there oddball special
189 # XXX is this really 100% correct? are there oddball special
190 # cases where a global tag should outrank a local tag but won't,
190 # cases where a global tag should outrank a local tag but won't,
191 # because cachetags does not contain rank info?
191 # because cachetags does not contain rank info?
192 alltags = {}
192 alltags = {}
193 _updatetags(cachetags, alltags)
193 _updatetags(cachetags, alltags)
194 return alltags
194 return alltags
195
195
196 for head in reversed(heads): # oldest to newest
196 for head in reversed(heads): # oldest to newest
197 assert (
197 assert repo.changelog.index.has_node(
198 head in repo.changelog.nodemap
198 head
199 ), b"tag cache returned bogus head %s" % short(head)
199 ), b"tag cache returned bogus head %s" % short(head)
200 fnodes = _filterfnodes(tagfnode, reversed(heads))
200 fnodes = _filterfnodes(tagfnode, reversed(heads))
201 alltags = _tagsfromfnodes(ui, repo, fnodes)
201 alltags = _tagsfromfnodes(ui, repo, fnodes)
202
202
203 # and update the cache (if necessary)
203 # and update the cache (if necessary)
204 if shouldwrite:
204 if shouldwrite:
205 _writetagcache(ui, repo, valid, alltags)
205 _writetagcache(ui, repo, valid, alltags)
206 return alltags
206 return alltags
207
207
208
208
209 def _filterfnodes(tagfnode, nodes):
209 def _filterfnodes(tagfnode, nodes):
210 """return a list of unique fnodes
210 """return a list of unique fnodes
211
211
212 The order of this list matches the order of "nodes". Preserving this order
212 The order of this list matches the order of "nodes". Preserving this order
213 is important as reading tags in different order provides different
213 is important as reading tags in different order provides different
214 results."""
214 results."""
215 seen = set() # set of fnode
215 seen = set() # set of fnode
216 fnodes = []
216 fnodes = []
217 for no in nodes: # oldest to newest
217 for no in nodes: # oldest to newest
218 fnode = tagfnode.get(no)
218 fnode = tagfnode.get(no)
219 if fnode and fnode not in seen:
219 if fnode and fnode not in seen:
220 seen.add(fnode)
220 seen.add(fnode)
221 fnodes.append(fnode)
221 fnodes.append(fnode)
222 return fnodes
222 return fnodes
223
223
224
224
225 def _tagsfromfnodes(ui, repo, fnodes):
225 def _tagsfromfnodes(ui, repo, fnodes):
226 """return a tagsmap from a list of file-node
226 """return a tagsmap from a list of file-node
227
227
228 tagsmap: tag name to (node, hist) 2-tuples.
228 tagsmap: tag name to (node, hist) 2-tuples.
229
229
230 The order of the list matters."""
230 The order of the list matters."""
231 alltags = {}
231 alltags = {}
232 fctx = None
232 fctx = None
233 for fnode in fnodes:
233 for fnode in fnodes:
234 if fctx is None:
234 if fctx is None:
235 fctx = repo.filectx(b'.hgtags', fileid=fnode)
235 fctx = repo.filectx(b'.hgtags', fileid=fnode)
236 else:
236 else:
237 fctx = fctx.filectx(fnode)
237 fctx = fctx.filectx(fnode)
238 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
238 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
239 _updatetags(filetags, alltags)
239 _updatetags(filetags, alltags)
240 return alltags
240 return alltags
241
241
242
242
243 def readlocaltags(ui, repo, alltags, tagtypes):
243 def readlocaltags(ui, repo, alltags, tagtypes):
244 '''Read local tags in repo. Update alltags and tagtypes.'''
244 '''Read local tags in repo. Update alltags and tagtypes.'''
245 try:
245 try:
246 data = repo.vfs.read(b"localtags")
246 data = repo.vfs.read(b"localtags")
247 except IOError as inst:
247 except IOError as inst:
248 if inst.errno != errno.ENOENT:
248 if inst.errno != errno.ENOENT:
249 raise
249 raise
250 return
250 return
251
251
252 # localtags is in the local encoding; re-encode to UTF-8 on
252 # localtags is in the local encoding; re-encode to UTF-8 on
253 # input for consistency with the rest of this module.
253 # input for consistency with the rest of this module.
254 filetags = _readtags(
254 filetags = _readtags(
255 ui, repo, data.splitlines(), b"localtags", recode=encoding.fromlocal
255 ui, repo, data.splitlines(), b"localtags", recode=encoding.fromlocal
256 )
256 )
257
257
258 # remove tags pointing to invalid nodes
258 # remove tags pointing to invalid nodes
259 cl = repo.changelog
259 cl = repo.changelog
260 for t in list(filetags):
260 for t in list(filetags):
261 try:
261 try:
262 cl.rev(filetags[t][0])
262 cl.rev(filetags[t][0])
263 except (LookupError, ValueError):
263 except (LookupError, ValueError):
264 del filetags[t]
264 del filetags[t]
265
265
266 _updatetags(filetags, alltags, b'local', tagtypes)
266 _updatetags(filetags, alltags, b'local', tagtypes)
267
267
268
268
269 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
269 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
270 '''Read tag definitions from a file (or any source of lines).
270 '''Read tag definitions from a file (or any source of lines).
271
271
272 This function returns two sortdicts with similar information:
272 This function returns two sortdicts with similar information:
273
273
274 - the first dict, bintaghist, contains the tag information as expected by
274 - the first dict, bintaghist, contains the tag information as expected by
275 the _readtags function, i.e. a mapping from tag name to (node, hist):
275 the _readtags function, i.e. a mapping from tag name to (node, hist):
276 - node is the node id from the last line read for that name,
276 - node is the node id from the last line read for that name,
277 - hist is the list of node ids previously associated with it (in file
277 - hist is the list of node ids previously associated with it (in file
278 order). All node ids are binary, not hex.
278 order). All node ids are binary, not hex.
279
279
280 - the second dict, hextaglines, is a mapping from tag name to a list of
280 - the second dict, hextaglines, is a mapping from tag name to a list of
281 [hexnode, line number] pairs, ordered from the oldest to the newest node.
281 [hexnode, line number] pairs, ordered from the oldest to the newest node.
282
282
283 When calcnodelines is False the hextaglines dict is not calculated (an
283 When calcnodelines is False the hextaglines dict is not calculated (an
284 empty dict is returned). This is done to improve this function's
284 empty dict is returned). This is done to improve this function's
285 performance in cases where the line numbers are not needed.
285 performance in cases where the line numbers are not needed.
286 '''
286 '''
287
287
288 bintaghist = util.sortdict()
288 bintaghist = util.sortdict()
289 hextaglines = util.sortdict()
289 hextaglines = util.sortdict()
290 count = 0
290 count = 0
291
291
292 def dbg(msg):
292 def dbg(msg):
293 ui.debug(b"%s, line %d: %s\n" % (fn, count, msg))
293 ui.debug(b"%s, line %d: %s\n" % (fn, count, msg))
294
294
295 for nline, line in enumerate(lines):
295 for nline, line in enumerate(lines):
296 count += 1
296 count += 1
297 if not line:
297 if not line:
298 continue
298 continue
299 try:
299 try:
300 (nodehex, name) = line.split(b" ", 1)
300 (nodehex, name) = line.split(b" ", 1)
301 except ValueError:
301 except ValueError:
302 dbg(b"cannot parse entry")
302 dbg(b"cannot parse entry")
303 continue
303 continue
304 name = name.strip()
304 name = name.strip()
305 if recode:
305 if recode:
306 name = recode(name)
306 name = recode(name)
307 try:
307 try:
308 nodebin = bin(nodehex)
308 nodebin = bin(nodehex)
309 except TypeError:
309 except TypeError:
310 dbg(b"node '%s' is not well formed" % nodehex)
310 dbg(b"node '%s' is not well formed" % nodehex)
311 continue
311 continue
312
312
313 # update filetags
313 # update filetags
314 if calcnodelines:
314 if calcnodelines:
315 # map tag name to a list of line numbers
315 # map tag name to a list of line numbers
316 if name not in hextaglines:
316 if name not in hextaglines:
317 hextaglines[name] = []
317 hextaglines[name] = []
318 hextaglines[name].append([nodehex, nline])
318 hextaglines[name].append([nodehex, nline])
319 continue
319 continue
320 # map tag name to (node, hist)
320 # map tag name to (node, hist)
321 if name not in bintaghist:
321 if name not in bintaghist:
322 bintaghist[name] = []
322 bintaghist[name] = []
323 bintaghist[name].append(nodebin)
323 bintaghist[name].append(nodebin)
324 return bintaghist, hextaglines
324 return bintaghist, hextaglines
325
325
326
326
327 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
327 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
328 '''Read tag definitions from a file (or any source of lines).
328 '''Read tag definitions from a file (or any source of lines).
329
329
330 Returns a mapping from tag name to (node, hist).
330 Returns a mapping from tag name to (node, hist).
331
331
332 "node" is the node id from the last line read for that name. "hist"
332 "node" is the node id from the last line read for that name. "hist"
333 is the list of node ids previously associated with it (in file order).
333 is the list of node ids previously associated with it (in file order).
334 All node ids are binary, not hex.
334 All node ids are binary, not hex.
335 '''
335 '''
336 filetags, nodelines = _readtaghist(
336 filetags, nodelines = _readtaghist(
337 ui, repo, lines, fn, recode=recode, calcnodelines=calcnodelines
337 ui, repo, lines, fn, recode=recode, calcnodelines=calcnodelines
338 )
338 )
339 # util.sortdict().__setitem__ is much slower at replacing then inserting
339 # util.sortdict().__setitem__ is much slower at replacing then inserting
340 # new entries. The difference can matter if there are thousands of tags.
340 # new entries. The difference can matter if there are thousands of tags.
341 # Create a new sortdict to avoid the performance penalty.
341 # Create a new sortdict to avoid the performance penalty.
342 newtags = util.sortdict()
342 newtags = util.sortdict()
343 for tag, taghist in filetags.items():
343 for tag, taghist in filetags.items():
344 newtags[tag] = (taghist[-1], taghist[:-1])
344 newtags[tag] = (taghist[-1], taghist[:-1])
345 return newtags
345 return newtags
346
346
347
347
348 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
348 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
349 """Incorporate the tag info read from one file into dictionnaries
349 """Incorporate the tag info read from one file into dictionnaries
350
350
351 The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
351 The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
352
352
353 The second one, 'tagtypes', is optional and will be updated to track the
353 The second one, 'tagtypes', is optional and will be updated to track the
354 "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
354 "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
355 needs to be set."""
355 needs to be set."""
356 if tagtype is None:
356 if tagtype is None:
357 assert tagtypes is None
357 assert tagtypes is None
358
358
359 for name, nodehist in pycompat.iteritems(filetags):
359 for name, nodehist in pycompat.iteritems(filetags):
360 if name not in alltags:
360 if name not in alltags:
361 alltags[name] = nodehist
361 alltags[name] = nodehist
362 if tagtype is not None:
362 if tagtype is not None:
363 tagtypes[name] = tagtype
363 tagtypes[name] = tagtype
364 continue
364 continue
365
365
366 # we prefer alltags[name] if:
366 # we prefer alltags[name] if:
367 # it supersedes us OR
367 # it supersedes us OR
368 # mutual supersedes and it has a higher rank
368 # mutual supersedes and it has a higher rank
369 # otherwise we win because we're tip-most
369 # otherwise we win because we're tip-most
370 anode, ahist = nodehist
370 anode, ahist = nodehist
371 bnode, bhist = alltags[name]
371 bnode, bhist = alltags[name]
372 if (
372 if (
373 bnode != anode
373 bnode != anode
374 and anode in bhist
374 and anode in bhist
375 and (bnode not in ahist or len(bhist) > len(ahist))
375 and (bnode not in ahist or len(bhist) > len(ahist))
376 ):
376 ):
377 anode = bnode
377 anode = bnode
378 elif tagtype is not None:
378 elif tagtype is not None:
379 tagtypes[name] = tagtype
379 tagtypes[name] = tagtype
380 ahist.extend([n for n in bhist if n not in ahist])
380 ahist.extend([n for n in bhist if n not in ahist])
381 alltags[name] = anode, ahist
381 alltags[name] = anode, ahist
382
382
383
383
384 def _filename(repo):
384 def _filename(repo):
385 """name of a tagcache file for a given repo or repoview"""
385 """name of a tagcache file for a given repo or repoview"""
386 filename = b'tags2'
386 filename = b'tags2'
387 if repo.filtername:
387 if repo.filtername:
388 filename = b'%s-%s' % (filename, repo.filtername)
388 filename = b'%s-%s' % (filename, repo.filtername)
389 return filename
389 return filename
390
390
391
391
392 def _readtagcache(ui, repo):
392 def _readtagcache(ui, repo):
393 '''Read the tag cache.
393 '''Read the tag cache.
394
394
395 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
395 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
396
396
397 If the cache is completely up-to-date, "cachetags" is a dict of the
397 If the cache is completely up-to-date, "cachetags" is a dict of the
398 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
398 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
399 None and "shouldwrite" is False.
399 None and "shouldwrite" is False.
400
400
401 If the cache is not up to date, "cachetags" is None. "heads" is a list
401 If the cache is not up to date, "cachetags" is None. "heads" is a list
402 of all heads currently in the repository, ordered from tip to oldest.
402 of all heads currently in the repository, ordered from tip to oldest.
403 "validinfo" is a tuple describing cache validation info. This is used
403 "validinfo" is a tuple describing cache validation info. This is used
404 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
404 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
405 filenode. "shouldwrite" is True.
405 filenode. "shouldwrite" is True.
406
406
407 If the cache is not up to date, the caller is responsible for reading tag
407 If the cache is not up to date, the caller is responsible for reading tag
408 info from each returned head. (See findglobaltags().)
408 info from each returned head. (See findglobaltags().)
409 '''
409 '''
410 try:
410 try:
411 cachefile = repo.cachevfs(_filename(repo), b'r')
411 cachefile = repo.cachevfs(_filename(repo), b'r')
412 # force reading the file for static-http
412 # force reading the file for static-http
413 cachelines = iter(cachefile)
413 cachelines = iter(cachefile)
414 except IOError:
414 except IOError:
415 cachefile = None
415 cachefile = None
416
416
417 cacherev = None
417 cacherev = None
418 cachenode = None
418 cachenode = None
419 cachehash = None
419 cachehash = None
420 if cachefile:
420 if cachefile:
421 try:
421 try:
422 validline = next(cachelines)
422 validline = next(cachelines)
423 validline = validline.split()
423 validline = validline.split()
424 cacherev = int(validline[0])
424 cacherev = int(validline[0])
425 cachenode = bin(validline[1])
425 cachenode = bin(validline[1])
426 if len(validline) > 2:
426 if len(validline) > 2:
427 cachehash = bin(validline[2])
427 cachehash = bin(validline[2])
428 except Exception:
428 except Exception:
429 # corruption of the cache, just recompute it.
429 # corruption of the cache, just recompute it.
430 pass
430 pass
431
431
432 tipnode = repo.changelog.tip()
432 tipnode = repo.changelog.tip()
433 tiprev = len(repo.changelog) - 1
433 tiprev = len(repo.changelog) - 1
434
434
435 # Case 1 (common): tip is the same, so nothing has changed.
435 # Case 1 (common): tip is the same, so nothing has changed.
436 # (Unchanged tip trivially means no changesets have been added.
436 # (Unchanged tip trivially means no changesets have been added.
437 # But, thanks to localrepository.destroyed(), it also means none
437 # But, thanks to localrepository.destroyed(), it also means none
438 # have been destroyed by strip or rollback.)
438 # have been destroyed by strip or rollback.)
439 if (
439 if (
440 cacherev == tiprev
440 cacherev == tiprev
441 and cachenode == tipnode
441 and cachenode == tipnode
442 and cachehash == scmutil.filteredhash(repo, tiprev)
442 and cachehash == scmutil.filteredhash(repo, tiprev)
443 ):
443 ):
444 tags = _readtags(ui, repo, cachelines, cachefile.name)
444 tags = _readtags(ui, repo, cachelines, cachefile.name)
445 cachefile.close()
445 cachefile.close()
446 return (None, None, None, tags, False)
446 return (None, None, None, tags, False)
447 if cachefile:
447 if cachefile:
448 cachefile.close() # ignore rest of file
448 cachefile.close() # ignore rest of file
449
449
450 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
450 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
451
451
452 repoheads = repo.heads()
452 repoheads = repo.heads()
453 # Case 2 (uncommon): empty repo; get out quickly and don't bother
453 # Case 2 (uncommon): empty repo; get out quickly and don't bother
454 # writing an empty cache.
454 # writing an empty cache.
455 if repoheads == [nullid]:
455 if repoheads == [nullid]:
456 return ([], {}, valid, {}, False)
456 return ([], {}, valid, {}, False)
457
457
458 # Case 3 (uncommon): cache file missing or empty.
458 # Case 3 (uncommon): cache file missing or empty.
459
459
460 # Case 4 (uncommon): tip rev decreased. This should only happen
460 # Case 4 (uncommon): tip rev decreased. This should only happen
461 # when we're called from localrepository.destroyed(). Refresh the
461 # when we're called from localrepository.destroyed(). Refresh the
462 # cache so future invocations will not see disappeared heads in the
462 # cache so future invocations will not see disappeared heads in the
463 # cache.
463 # cache.
464
464
465 # Case 5 (common): tip has changed, so we've added/replaced heads.
465 # Case 5 (common): tip has changed, so we've added/replaced heads.
466
466
467 # As it happens, the code to handle cases 3, 4, 5 is the same.
467 # As it happens, the code to handle cases 3, 4, 5 is the same.
468
468
469 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
469 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
470 # exposed".
470 # exposed".
471 if not len(repo.file(b'.hgtags')):
471 if not len(repo.file(b'.hgtags')):
472 # No tags have ever been committed, so we can avoid a
472 # No tags have ever been committed, so we can avoid a
473 # potentially expensive search.
473 # potentially expensive search.
474 return ([], {}, valid, None, True)
474 return ([], {}, valid, None, True)
475
475
476 # Now we have to lookup the .hgtags filenode for every new head.
476 # Now we have to lookup the .hgtags filenode for every new head.
477 # This is the most expensive part of finding tags, so performance
477 # This is the most expensive part of finding tags, so performance
478 # depends primarily on the size of newheads. Worst case: no cache
478 # depends primarily on the size of newheads. Worst case: no cache
479 # file, so newheads == repoheads.
479 # file, so newheads == repoheads.
480 # Reversed order helps the cache ('repoheads' is in descending order)
480 # Reversed order helps the cache ('repoheads' is in descending order)
481 cachefnode = _getfnodes(ui, repo, reversed(repoheads))
481 cachefnode = _getfnodes(ui, repo, reversed(repoheads))
482
482
483 # Caller has to iterate over all heads, but can use the filenodes in
483 # Caller has to iterate over all heads, but can use the filenodes in
484 # cachefnode to get to each .hgtags revision quickly.
484 # cachefnode to get to each .hgtags revision quickly.
485 return (repoheads, cachefnode, valid, None, True)
485 return (repoheads, cachefnode, valid, None, True)
486
486
487
487
488 def _getfnodes(ui, repo, nodes):
488 def _getfnodes(ui, repo, nodes):
489 """return .hgtags fnodes for a list of changeset nodes
489 """return .hgtags fnodes for a list of changeset nodes
490
490
491 Return value is a {node: fnode} mapping. There will be no entry for nodes
491 Return value is a {node: fnode} mapping. There will be no entry for nodes
492 without a '.hgtags' file.
492 without a '.hgtags' file.
493 """
493 """
494 starttime = util.timer()
494 starttime = util.timer()
495 fnodescache = hgtagsfnodescache(repo.unfiltered())
495 fnodescache = hgtagsfnodescache(repo.unfiltered())
496 cachefnode = {}
496 cachefnode = {}
497 for node in nodes:
497 for node in nodes:
498 fnode = fnodescache.getfnode(node)
498 fnode = fnodescache.getfnode(node)
499 if fnode != nullid:
499 if fnode != nullid:
500 cachefnode[node] = fnode
500 cachefnode[node] = fnode
501
501
502 fnodescache.write()
502 fnodescache.write()
503
503
504 duration = util.timer() - starttime
504 duration = util.timer() - starttime
505 ui.log(
505 ui.log(
506 b'tagscache',
506 b'tagscache',
507 b'%d/%d cache hits/lookups in %0.4f seconds\n',
507 b'%d/%d cache hits/lookups in %0.4f seconds\n',
508 fnodescache.hitcount,
508 fnodescache.hitcount,
509 fnodescache.lookupcount,
509 fnodescache.lookupcount,
510 duration,
510 duration,
511 )
511 )
512 return cachefnode
512 return cachefnode
513
513
514
514
515 def _writetagcache(ui, repo, valid, cachetags):
515 def _writetagcache(ui, repo, valid, cachetags):
516 filename = _filename(repo)
516 filename = _filename(repo)
517 try:
517 try:
518 cachefile = repo.cachevfs(filename, b'w', atomictemp=True)
518 cachefile = repo.cachevfs(filename, b'w', atomictemp=True)
519 except (OSError, IOError):
519 except (OSError, IOError):
520 return
520 return
521
521
522 ui.log(
522 ui.log(
523 b'tagscache',
523 b'tagscache',
524 b'writing .hg/cache/%s with %d tags\n',
524 b'writing .hg/cache/%s with %d tags\n',
525 filename,
525 filename,
526 len(cachetags),
526 len(cachetags),
527 )
527 )
528
528
529 if valid[2]:
529 if valid[2]:
530 cachefile.write(
530 cachefile.write(
531 b'%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2]))
531 b'%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2]))
532 )
532 )
533 else:
533 else:
534 cachefile.write(b'%d %s\n' % (valid[0], hex(valid[1])))
534 cachefile.write(b'%d %s\n' % (valid[0], hex(valid[1])))
535
535
536 # Tag names in the cache are in UTF-8 -- which is the whole reason
536 # Tag names in the cache are in UTF-8 -- which is the whole reason
537 # we keep them in UTF-8 throughout this module. If we converted
537 # we keep them in UTF-8 throughout this module. If we converted
538 # them local encoding on input, we would lose info writing them to
538 # them local encoding on input, we would lose info writing them to
539 # the cache.
539 # the cache.
540 for (name, (node, hist)) in sorted(pycompat.iteritems(cachetags)):
540 for (name, (node, hist)) in sorted(pycompat.iteritems(cachetags)):
541 for n in hist:
541 for n in hist:
542 cachefile.write(b"%s %s\n" % (hex(n), name))
542 cachefile.write(b"%s %s\n" % (hex(n), name))
543 cachefile.write(b"%s %s\n" % (hex(node), name))
543 cachefile.write(b"%s %s\n" % (hex(node), name))
544
544
545 try:
545 try:
546 cachefile.close()
546 cachefile.close()
547 except (OSError, IOError):
547 except (OSError, IOError):
548 pass
548 pass
549
549
550
550
551 def tag(repo, names, node, message, local, user, date, editor=False):
551 def tag(repo, names, node, message, local, user, date, editor=False):
552 '''tag a revision with one or more symbolic names.
552 '''tag a revision with one or more symbolic names.
553
553
554 names is a list of strings or, when adding a single tag, names may be a
554 names is a list of strings or, when adding a single tag, names may be a
555 string.
555 string.
556
556
557 if local is True, the tags are stored in a per-repository file.
557 if local is True, the tags are stored in a per-repository file.
558 otherwise, they are stored in the .hgtags file, and a new
558 otherwise, they are stored in the .hgtags file, and a new
559 changeset is committed with the change.
559 changeset is committed with the change.
560
560
561 keyword arguments:
561 keyword arguments:
562
562
563 local: whether to store tags in non-version-controlled file
563 local: whether to store tags in non-version-controlled file
564 (default False)
564 (default False)
565
565
566 message: commit message to use if committing
566 message: commit message to use if committing
567
567
568 user: name of user to use if committing
568 user: name of user to use if committing
569
569
570 date: date tuple to use if committing'''
570 date: date tuple to use if committing'''
571
571
572 if not local:
572 if not local:
573 m = matchmod.exact([b'.hgtags'])
573 m = matchmod.exact([b'.hgtags'])
574 if any(repo.status(match=m, unknown=True, ignored=True)):
574 if any(repo.status(match=m, unknown=True, ignored=True)):
575 raise error.Abort(
575 raise error.Abort(
576 _(b'working copy of .hgtags is changed'),
576 _(b'working copy of .hgtags is changed'),
577 hint=_(b'please commit .hgtags manually'),
577 hint=_(b'please commit .hgtags manually'),
578 )
578 )
579
579
580 with repo.wlock():
580 with repo.wlock():
581 repo.tags() # instantiate the cache
581 repo.tags() # instantiate the cache
582 _tag(repo, names, node, message, local, user, date, editor=editor)
582 _tag(repo, names, node, message, local, user, date, editor=editor)
583
583
584
584
585 def _tag(
585 def _tag(
586 repo, names, node, message, local, user, date, extra=None, editor=False
586 repo, names, node, message, local, user, date, extra=None, editor=False
587 ):
587 ):
588 if isinstance(names, bytes):
588 if isinstance(names, bytes):
589 names = (names,)
589 names = (names,)
590
590
591 branches = repo.branchmap()
591 branches = repo.branchmap()
592 for name in names:
592 for name in names:
593 repo.hook(b'pretag', throw=True, node=hex(node), tag=name, local=local)
593 repo.hook(b'pretag', throw=True, node=hex(node), tag=name, local=local)
594 if name in branches:
594 if name in branches:
595 repo.ui.warn(
595 repo.ui.warn(
596 _(b"warning: tag %s conflicts with existing branch name\n")
596 _(b"warning: tag %s conflicts with existing branch name\n")
597 % name
597 % name
598 )
598 )
599
599
600 def writetags(fp, names, munge, prevtags):
600 def writetags(fp, names, munge, prevtags):
601 fp.seek(0, io.SEEK_END)
601 fp.seek(0, io.SEEK_END)
602 if prevtags and not prevtags.endswith(b'\n'):
602 if prevtags and not prevtags.endswith(b'\n'):
603 fp.write(b'\n')
603 fp.write(b'\n')
604 for name in names:
604 for name in names:
605 if munge:
605 if munge:
606 m = munge(name)
606 m = munge(name)
607 else:
607 else:
608 m = name
608 m = name
609
609
610 if repo._tagscache.tagtypes and name in repo._tagscache.tagtypes:
610 if repo._tagscache.tagtypes and name in repo._tagscache.tagtypes:
611 old = repo.tags().get(name, nullid)
611 old = repo.tags().get(name, nullid)
612 fp.write(b'%s %s\n' % (hex(old), m))
612 fp.write(b'%s %s\n' % (hex(old), m))
613 fp.write(b'%s %s\n' % (hex(node), m))
613 fp.write(b'%s %s\n' % (hex(node), m))
614 fp.close()
614 fp.close()
615
615
616 prevtags = b''
616 prevtags = b''
617 if local:
617 if local:
618 try:
618 try:
619 fp = repo.vfs(b'localtags', b'r+')
619 fp = repo.vfs(b'localtags', b'r+')
620 except IOError:
620 except IOError:
621 fp = repo.vfs(b'localtags', b'a')
621 fp = repo.vfs(b'localtags', b'a')
622 else:
622 else:
623 prevtags = fp.read()
623 prevtags = fp.read()
624
624
625 # local tags are stored in the current charset
625 # local tags are stored in the current charset
626 writetags(fp, names, None, prevtags)
626 writetags(fp, names, None, prevtags)
627 for name in names:
627 for name in names:
628 repo.hook(b'tag', node=hex(node), tag=name, local=local)
628 repo.hook(b'tag', node=hex(node), tag=name, local=local)
629 return
629 return
630
630
631 try:
631 try:
632 fp = repo.wvfs(b'.hgtags', b'rb+')
632 fp = repo.wvfs(b'.hgtags', b'rb+')
633 except IOError as e:
633 except IOError as e:
634 if e.errno != errno.ENOENT:
634 if e.errno != errno.ENOENT:
635 raise
635 raise
636 fp = repo.wvfs(b'.hgtags', b'ab')
636 fp = repo.wvfs(b'.hgtags', b'ab')
637 else:
637 else:
638 prevtags = fp.read()
638 prevtags = fp.read()
639
639
640 # committed tags are stored in UTF-8
640 # committed tags are stored in UTF-8
641 writetags(fp, names, encoding.fromlocal, prevtags)
641 writetags(fp, names, encoding.fromlocal, prevtags)
642
642
643 fp.close()
643 fp.close()
644
644
645 repo.invalidatecaches()
645 repo.invalidatecaches()
646
646
647 if b'.hgtags' not in repo.dirstate:
647 if b'.hgtags' not in repo.dirstate:
648 repo[None].add([b'.hgtags'])
648 repo[None].add([b'.hgtags'])
649
649
650 m = matchmod.exact([b'.hgtags'])
650 m = matchmod.exact([b'.hgtags'])
651 tagnode = repo.commit(
651 tagnode = repo.commit(
652 message, user, date, extra=extra, match=m, editor=editor
652 message, user, date, extra=extra, match=m, editor=editor
653 )
653 )
654
654
655 for name in names:
655 for name in names:
656 repo.hook(b'tag', node=hex(node), tag=name, local=local)
656 repo.hook(b'tag', node=hex(node), tag=name, local=local)
657
657
658 return tagnode
658 return tagnode
659
659
660
660
661 _fnodescachefile = b'hgtagsfnodes1'
661 _fnodescachefile = b'hgtagsfnodes1'
662 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
662 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
663 _fnodesmissingrec = b'\xff' * 24
663 _fnodesmissingrec = b'\xff' * 24
664
664
665
665
666 class hgtagsfnodescache(object):
666 class hgtagsfnodescache(object):
667 """Persistent cache mapping revisions to .hgtags filenodes.
667 """Persistent cache mapping revisions to .hgtags filenodes.
668
668
669 The cache is an array of records. Each item in the array corresponds to
669 The cache is an array of records. Each item in the array corresponds to
670 a changelog revision. Values in the array contain the first 4 bytes of
670 a changelog revision. Values in the array contain the first 4 bytes of
671 the node hash and the 20 bytes .hgtags filenode for that revision.
671 the node hash and the 20 bytes .hgtags filenode for that revision.
672
672
673 The first 4 bytes are present as a form of verification. Repository
673 The first 4 bytes are present as a form of verification. Repository
674 stripping and rewriting may change the node at a numeric revision in the
674 stripping and rewriting may change the node at a numeric revision in the
675 changelog. The changeset fragment serves as a verifier to detect
675 changelog. The changeset fragment serves as a verifier to detect
676 rewriting. This logic is shared with the rev branch cache (see
676 rewriting. This logic is shared with the rev branch cache (see
677 branchmap.py).
677 branchmap.py).
678
678
679 The instance holds in memory the full cache content but entries are
679 The instance holds in memory the full cache content but entries are
680 only parsed on read.
680 only parsed on read.
681
681
682 Instances behave like lists. ``c[i]`` works where i is a rev or
682 Instances behave like lists. ``c[i]`` works where i is a rev or
683 changeset node. Missing indexes are populated automatically on access.
683 changeset node. Missing indexes are populated automatically on access.
684 """
684 """
685
685
686 def __init__(self, repo):
686 def __init__(self, repo):
687 assert repo.filtername is None
687 assert repo.filtername is None
688
688
689 self._repo = repo
689 self._repo = repo
690
690
691 # Only for reporting purposes.
691 # Only for reporting purposes.
692 self.lookupcount = 0
692 self.lookupcount = 0
693 self.hitcount = 0
693 self.hitcount = 0
694
694
695 try:
695 try:
696 data = repo.cachevfs.read(_fnodescachefile)
696 data = repo.cachevfs.read(_fnodescachefile)
697 except (OSError, IOError):
697 except (OSError, IOError):
698 data = b""
698 data = b""
699 self._raw = bytearray(data)
699 self._raw = bytearray(data)
700
700
701 # The end state of self._raw is an array that is of the exact length
701 # The end state of self._raw is an array that is of the exact length
702 # required to hold a record for every revision in the repository.
702 # required to hold a record for every revision in the repository.
703 # We truncate or extend the array as necessary. self._dirtyoffset is
703 # We truncate or extend the array as necessary. self._dirtyoffset is
704 # defined to be the start offset at which we need to write the output
704 # defined to be the start offset at which we need to write the output
705 # file. This offset is also adjusted when new entries are calculated
705 # file. This offset is also adjusted when new entries are calculated
706 # for array members.
706 # for array members.
707 cllen = len(repo.changelog)
707 cllen = len(repo.changelog)
708 wantedlen = cllen * _fnodesrecsize
708 wantedlen = cllen * _fnodesrecsize
709 rawlen = len(self._raw)
709 rawlen = len(self._raw)
710
710
711 self._dirtyoffset = None
711 self._dirtyoffset = None
712
712
713 if rawlen < wantedlen:
713 if rawlen < wantedlen:
714 self._dirtyoffset = rawlen
714 self._dirtyoffset = rawlen
715 self._raw.extend(b'\xff' * (wantedlen - rawlen))
715 self._raw.extend(b'\xff' * (wantedlen - rawlen))
716 elif rawlen > wantedlen:
716 elif rawlen > wantedlen:
717 # There's no easy way to truncate array instances. This seems
717 # There's no easy way to truncate array instances. This seems
718 # slightly less evil than copying a potentially large array slice.
718 # slightly less evil than copying a potentially large array slice.
719 for i in range(rawlen - wantedlen):
719 for i in range(rawlen - wantedlen):
720 self._raw.pop()
720 self._raw.pop()
721 self._dirtyoffset = len(self._raw)
721 self._dirtyoffset = len(self._raw)
722
722
723 def getfnode(self, node, computemissing=True):
723 def getfnode(self, node, computemissing=True):
724 """Obtain the filenode of the .hgtags file at a specified revision.
724 """Obtain the filenode of the .hgtags file at a specified revision.
725
725
726 If the value is in the cache, the entry will be validated and returned.
726 If the value is in the cache, the entry will be validated and returned.
727 Otherwise, the filenode will be computed and returned unless
727 Otherwise, the filenode will be computed and returned unless
728 "computemissing" is False, in which case None will be returned without
728 "computemissing" is False, in which case None will be returned without
729 any potentially expensive computation being performed.
729 any potentially expensive computation being performed.
730
730
731 If an .hgtags does not exist at the specified revision, nullid is
731 If an .hgtags does not exist at the specified revision, nullid is
732 returned.
732 returned.
733 """
733 """
734 if node == nullid:
734 if node == nullid:
735 return nullid
735 return nullid
736
736
737 ctx = self._repo[node]
737 ctx = self._repo[node]
738 rev = ctx.rev()
738 rev = ctx.rev()
739
739
740 self.lookupcount += 1
740 self.lookupcount += 1
741
741
742 offset = rev * _fnodesrecsize
742 offset = rev * _fnodesrecsize
743 record = b'%s' % self._raw[offset : offset + _fnodesrecsize]
743 record = b'%s' % self._raw[offset : offset + _fnodesrecsize]
744 properprefix = node[0:4]
744 properprefix = node[0:4]
745
745
746 # Validate and return existing entry.
746 # Validate and return existing entry.
747 if record != _fnodesmissingrec:
747 if record != _fnodesmissingrec:
748 fileprefix = record[0:4]
748 fileprefix = record[0:4]
749
749
750 if fileprefix == properprefix:
750 if fileprefix == properprefix:
751 self.hitcount += 1
751 self.hitcount += 1
752 return record[4:]
752 return record[4:]
753
753
754 # Fall through.
754 # Fall through.
755
755
756 # If we get here, the entry is either missing or invalid.
756 # If we get here, the entry is either missing or invalid.
757
757
758 if not computemissing:
758 if not computemissing:
759 return None
759 return None
760
760
761 fnode = None
761 fnode = None
762 cl = self._repo.changelog
762 cl = self._repo.changelog
763 p1rev, p2rev = cl._uncheckedparentrevs(rev)
763 p1rev, p2rev = cl._uncheckedparentrevs(rev)
764 p1node = cl.node(p1rev)
764 p1node = cl.node(p1rev)
765 p1fnode = self.getfnode(p1node, computemissing=False)
765 p1fnode = self.getfnode(p1node, computemissing=False)
766 if p2rev != nullrev:
766 if p2rev != nullrev:
767 # There is some no-merge changeset where p1 is null and p2 is set
767 # There is some no-merge changeset where p1 is null and p2 is set
768 # Processing them as merge is just slower, but still gives a good
768 # Processing them as merge is just slower, but still gives a good
769 # result.
769 # result.
770 p2node = cl.node(p1rev)
770 p2node = cl.node(p1rev)
771 p2fnode = self.getfnode(p2node, computemissing=False)
771 p2fnode = self.getfnode(p2node, computemissing=False)
772 if p1fnode != p2fnode:
772 if p1fnode != p2fnode:
773 # we cannot rely on readfast because we don't know against what
773 # we cannot rely on readfast because we don't know against what
774 # parent the readfast delta is computed
774 # parent the readfast delta is computed
775 p1fnode = None
775 p1fnode = None
776 if p1fnode is not None:
776 if p1fnode is not None:
777 mctx = ctx.manifestctx()
777 mctx = ctx.manifestctx()
778 fnode = mctx.readfast().get(b'.hgtags')
778 fnode = mctx.readfast().get(b'.hgtags')
779 if fnode is None:
779 if fnode is None:
780 fnode = p1fnode
780 fnode = p1fnode
781 if fnode is None:
781 if fnode is None:
782 # Populate missing entry.
782 # Populate missing entry.
783 try:
783 try:
784 fnode = ctx.filenode(b'.hgtags')
784 fnode = ctx.filenode(b'.hgtags')
785 except error.LookupError:
785 except error.LookupError:
786 # No .hgtags file on this revision.
786 # No .hgtags file on this revision.
787 fnode = nullid
787 fnode = nullid
788
788
789 self._writeentry(offset, properprefix, fnode)
789 self._writeentry(offset, properprefix, fnode)
790 return fnode
790 return fnode
791
791
792 def setfnode(self, node, fnode):
792 def setfnode(self, node, fnode):
793 """Set the .hgtags filenode for a given changeset."""
793 """Set the .hgtags filenode for a given changeset."""
794 assert len(fnode) == 20
794 assert len(fnode) == 20
795 ctx = self._repo[node]
795 ctx = self._repo[node]
796
796
797 # Do a lookup first to avoid writing if nothing has changed.
797 # Do a lookup first to avoid writing if nothing has changed.
798 if self.getfnode(ctx.node(), computemissing=False) == fnode:
798 if self.getfnode(ctx.node(), computemissing=False) == fnode:
799 return
799 return
800
800
801 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
801 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
802
802
803 def _writeentry(self, offset, prefix, fnode):
803 def _writeentry(self, offset, prefix, fnode):
804 # Slices on array instances only accept other array.
804 # Slices on array instances only accept other array.
805 entry = bytearray(prefix + fnode)
805 entry = bytearray(prefix + fnode)
806 self._raw[offset : offset + _fnodesrecsize] = entry
806 self._raw[offset : offset + _fnodesrecsize] = entry
807 # self._dirtyoffset could be None.
807 # self._dirtyoffset could be None.
808 self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0)
808 self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0)
809
809
810 def write(self):
810 def write(self):
811 """Perform all necessary writes to cache file.
811 """Perform all necessary writes to cache file.
812
812
813 This may no-op if no writes are needed or if a write lock could
813 This may no-op if no writes are needed or if a write lock could
814 not be obtained.
814 not be obtained.
815 """
815 """
816 if self._dirtyoffset is None:
816 if self._dirtyoffset is None:
817 return
817 return
818
818
819 data = self._raw[self._dirtyoffset :]
819 data = self._raw[self._dirtyoffset :]
820 if not data:
820 if not data:
821 return
821 return
822
822
823 repo = self._repo
823 repo = self._repo
824
824
825 try:
825 try:
826 lock = repo.wlock(wait=False)
826 lock = repo.wlock(wait=False)
827 except error.LockError:
827 except error.LockError:
828 repo.ui.log(
828 repo.ui.log(
829 b'tagscache',
829 b'tagscache',
830 b'not writing .hg/cache/%s because '
830 b'not writing .hg/cache/%s because '
831 b'lock cannot be acquired\n' % _fnodescachefile,
831 b'lock cannot be acquired\n' % _fnodescachefile,
832 )
832 )
833 return
833 return
834
834
835 try:
835 try:
836 f = repo.cachevfs.open(_fnodescachefile, b'ab')
836 f = repo.cachevfs.open(_fnodescachefile, b'ab')
837 try:
837 try:
838 # if the file has been truncated
838 # if the file has been truncated
839 actualoffset = f.tell()
839 actualoffset = f.tell()
840 if actualoffset < self._dirtyoffset:
840 if actualoffset < self._dirtyoffset:
841 self._dirtyoffset = actualoffset
841 self._dirtyoffset = actualoffset
842 data = self._raw[self._dirtyoffset :]
842 data = self._raw[self._dirtyoffset :]
843 f.seek(self._dirtyoffset)
843 f.seek(self._dirtyoffset)
844 f.truncate()
844 f.truncate()
845 repo.ui.log(
845 repo.ui.log(
846 b'tagscache',
846 b'tagscache',
847 b'writing %d bytes to cache/%s\n'
847 b'writing %d bytes to cache/%s\n'
848 % (len(data), _fnodescachefile),
848 % (len(data), _fnodescachefile),
849 )
849 )
850 f.write(data)
850 f.write(data)
851 self._dirtyoffset = None
851 self._dirtyoffset = None
852 finally:
852 finally:
853 f.close()
853 f.close()
854 except (IOError, OSError) as inst:
854 except (IOError, OSError) as inst:
855 repo.ui.log(
855 repo.ui.log(
856 b'tagscache',
856 b'tagscache',
857 b"couldn't write cache/%s: %s\n"
857 b"couldn't write cache/%s: %s\n"
858 % (_fnodescachefile, stringutil.forcebytestr(inst)),
858 % (_fnodescachefile, stringutil.forcebytestr(inst)),
859 )
859 )
860 finally:
860 finally:
861 lock.release()
861 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now