##// END OF EJS Templates
tags: use field names instead of field numbers on scmutil.status...
Augie Fackler -
r44051:ba5c39b9 default
parent child Browse files
Show More
@@ -1,861 +1,872
1 # tags.py - read tag info from local repository
1 # tags.py - read tag info from local repository
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 # Currently this module only deals with reading and caching tags.
9 # Currently this module only deals with reading and caching tags.
10 # Eventually, it could take care of updating (adding/removing/moving)
10 # Eventually, it could take care of updating (adding/removing/moving)
11 # tags too.
11 # tags too.
12
12
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import errno
15 import errno
16 import io
16 import io
17
17
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .i18n import _
25 from .i18n import _
26 from . import (
26 from . import (
27 encoding,
27 encoding,
28 error,
28 error,
29 match as matchmod,
29 match as matchmod,
30 pycompat,
30 pycompat,
31 scmutil,
31 scmutil,
32 util,
32 util,
33 )
33 )
34 from .utils import stringutil
34 from .utils import stringutil
35
35
36 # Tags computation can be expensive and caches exist to make it fast in
36 # Tags computation can be expensive and caches exist to make it fast in
37 # the common case.
37 # the common case.
38 #
38 #
39 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
39 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
40 # each revision in the repository. The file is effectively an array of
40 # each revision in the repository. The file is effectively an array of
41 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
41 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
42 # details.
42 # details.
43 #
43 #
44 # The .hgtags filenode cache grows in proportion to the length of the
44 # The .hgtags filenode cache grows in proportion to the length of the
45 # changelog. The file is truncated when the # changelog is stripped.
45 # changelog. The file is truncated when the # changelog is stripped.
46 #
46 #
47 # The purpose of the filenode cache is to avoid the most expensive part
47 # The purpose of the filenode cache is to avoid the most expensive part
48 # of finding global tags, which is looking up the .hgtags filenode in the
48 # of finding global tags, which is looking up the .hgtags filenode in the
49 # manifest for each head. This can take dozens or over 100ms for
49 # manifest for each head. This can take dozens or over 100ms for
50 # repositories with very large manifests. Multiplied by dozens or even
50 # repositories with very large manifests. Multiplied by dozens or even
51 # hundreds of heads and there is a significant performance concern.
51 # hundreds of heads and there is a significant performance concern.
52 #
52 #
53 # There also exist a separate cache file for each repository filter.
53 # There also exist a separate cache file for each repository filter.
54 # These "tags-*" files store information about the history of tags.
54 # These "tags-*" files store information about the history of tags.
55 #
55 #
56 # The tags cache files consists of a cache validation line followed by
56 # The tags cache files consists of a cache validation line followed by
57 # a history of tags.
57 # a history of tags.
58 #
58 #
59 # The cache validation line has the format:
59 # The cache validation line has the format:
60 #
60 #
61 # <tiprev> <tipnode> [<filteredhash>]
61 # <tiprev> <tipnode> [<filteredhash>]
62 #
62 #
63 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
63 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
64 # node for that changeset. These redundantly identify the repository
64 # node for that changeset. These redundantly identify the repository
65 # tip from the time the cache was written. In addition, <filteredhash>,
65 # tip from the time the cache was written. In addition, <filteredhash>,
66 # if present, is a 40 character hex hash of the contents of the filtered
66 # if present, is a 40 character hex hash of the contents of the filtered
67 # revisions for this filter. If the set of filtered revs changes, the
67 # revisions for this filter. If the set of filtered revs changes, the
68 # hash will change and invalidate the cache.
68 # hash will change and invalidate the cache.
69 #
69 #
70 # The history part of the tags cache consists of lines of the form:
70 # The history part of the tags cache consists of lines of the form:
71 #
71 #
72 # <node> <tag>
72 # <node> <tag>
73 #
73 #
74 # (This format is identical to that of .hgtags files.)
74 # (This format is identical to that of .hgtags files.)
75 #
75 #
76 # <tag> is the tag name and <node> is the 40 character hex changeset
76 # <tag> is the tag name and <node> is the 40 character hex changeset
77 # the tag is associated with.
77 # the tag is associated with.
78 #
78 #
79 # Tags are written sorted by tag name.
79 # Tags are written sorted by tag name.
80 #
80 #
81 # Tags associated with multiple changesets have an entry for each changeset.
81 # Tags associated with multiple changesets have an entry for each changeset.
82 # The most recent changeset (in terms of revlog ordering for the head
82 # The most recent changeset (in terms of revlog ordering for the head
83 # setting it) for each tag is last.
83 # setting it) for each tag is last.
84
84
85
85
86 def fnoderevs(ui, repo, revs):
86 def fnoderevs(ui, repo, revs):
87 """return the list of '.hgtags' fnodes used in a set revisions
87 """return the list of '.hgtags' fnodes used in a set revisions
88
88
89 This is returned as list of unique fnodes. We use a list instead of a set
89 This is returned as list of unique fnodes. We use a list instead of a set
90 because order matters when it comes to tags."""
90 because order matters when it comes to tags."""
91 unfi = repo.unfiltered()
91 unfi = repo.unfiltered()
92 tonode = unfi.changelog.node
92 tonode = unfi.changelog.node
93 nodes = [tonode(r) for r in revs]
93 nodes = [tonode(r) for r in revs]
94 fnodes = _getfnodes(ui, repo, nodes)
94 fnodes = _getfnodes(ui, repo, nodes)
95 fnodes = _filterfnodes(fnodes, nodes)
95 fnodes = _filterfnodes(fnodes, nodes)
96 return fnodes
96 return fnodes
97
97
98
98
99 def _nulltonone(value):
99 def _nulltonone(value):
100 """convert nullid to None
100 """convert nullid to None
101
101
102 For tag value, nullid means "deleted". This small utility function helps
102 For tag value, nullid means "deleted". This small utility function helps
103 translating that to None."""
103 translating that to None."""
104 if value == nullid:
104 if value == nullid:
105 return None
105 return None
106 return value
106 return value
107
107
108
108
109 def difftags(ui, repo, oldfnodes, newfnodes):
109 def difftags(ui, repo, oldfnodes, newfnodes):
110 """list differences between tags expressed in two set of file-nodes
110 """list differences between tags expressed in two set of file-nodes
111
111
112 The list contains entries in the form: (tagname, oldvalue, new value).
112 The list contains entries in the form: (tagname, oldvalue, new value).
113 None is used to expressed missing value:
113 None is used to expressed missing value:
114 ('foo', None, 'abcd') is a new tag,
114 ('foo', None, 'abcd') is a new tag,
115 ('bar', 'ef01', None) is a deletion,
115 ('bar', 'ef01', None) is a deletion,
116 ('baz', 'abcd', 'ef01') is a tag movement.
116 ('baz', 'abcd', 'ef01') is a tag movement.
117 """
117 """
118 if oldfnodes == newfnodes:
118 if oldfnodes == newfnodes:
119 return []
119 return []
120 oldtags = _tagsfromfnodes(ui, repo, oldfnodes)
120 oldtags = _tagsfromfnodes(ui, repo, oldfnodes)
121 newtags = _tagsfromfnodes(ui, repo, newfnodes)
121 newtags = _tagsfromfnodes(ui, repo, newfnodes)
122
122
123 # list of (tag, old, new): None means missing
123 # list of (tag, old, new): None means missing
124 entries = []
124 entries = []
125 for tag, (new, __) in newtags.items():
125 for tag, (new, __) in newtags.items():
126 new = _nulltonone(new)
126 new = _nulltonone(new)
127 old, __ = oldtags.pop(tag, (None, None))
127 old, __ = oldtags.pop(tag, (None, None))
128 old = _nulltonone(old)
128 old = _nulltonone(old)
129 if old != new:
129 if old != new:
130 entries.append((tag, old, new))
130 entries.append((tag, old, new))
131 # handle deleted tags
131 # handle deleted tags
132 for tag, (old, __) in oldtags.items():
132 for tag, (old, __) in oldtags.items():
133 old = _nulltonone(old)
133 old = _nulltonone(old)
134 if old is not None:
134 if old is not None:
135 entries.append((tag, old, None))
135 entries.append((tag, old, None))
136 entries.sort()
136 entries.sort()
137 return entries
137 return entries
138
138
139
139
140 def writediff(fp, difflist):
140 def writediff(fp, difflist):
141 """write tags diff information to a file.
141 """write tags diff information to a file.
142
142
143 Data are stored with a line based format:
143 Data are stored with a line based format:
144
144
145 <action> <hex-node> <tag-name>\n
145 <action> <hex-node> <tag-name>\n
146
146
147 Action are defined as follow:
147 Action are defined as follow:
148 -R tag is removed,
148 -R tag is removed,
149 +A tag is added,
149 +A tag is added,
150 -M tag is moved (old value),
150 -M tag is moved (old value),
151 +M tag is moved (new value),
151 +M tag is moved (new value),
152
152
153 Example:
153 Example:
154
154
155 +A 875517b4806a848f942811a315a5bce30804ae85 t5
155 +A 875517b4806a848f942811a315a5bce30804ae85 t5
156
156
157 See documentation of difftags output for details about the input.
157 See documentation of difftags output for details about the input.
158 """
158 """
159 add = b'+A %s %s\n'
159 add = b'+A %s %s\n'
160 remove = b'-R %s %s\n'
160 remove = b'-R %s %s\n'
161 updateold = b'-M %s %s\n'
161 updateold = b'-M %s %s\n'
162 updatenew = b'+M %s %s\n'
162 updatenew = b'+M %s %s\n'
163 for tag, old, new in difflist:
163 for tag, old, new in difflist:
164 # translate to hex
164 # translate to hex
165 if old is not None:
165 if old is not None:
166 old = hex(old)
166 old = hex(old)
167 if new is not None:
167 if new is not None:
168 new = hex(new)
168 new = hex(new)
169 # write to file
169 # write to file
170 if old is None:
170 if old is None:
171 fp.write(add % (new, tag))
171 fp.write(add % (new, tag))
172 elif new is None:
172 elif new is None:
173 fp.write(remove % (old, tag))
173 fp.write(remove % (old, tag))
174 else:
174 else:
175 fp.write(updateold % (old, tag))
175 fp.write(updateold % (old, tag))
176 fp.write(updatenew % (new, tag))
176 fp.write(updatenew % (new, tag))
177
177
178
178
179 def findglobaltags(ui, repo):
179 def findglobaltags(ui, repo):
180 '''Find global tags in a repo: return a tagsmap
180 '''Find global tags in a repo: return a tagsmap
181
181
182 tagsmap: tag name to (node, hist) 2-tuples.
182 tagsmap: tag name to (node, hist) 2-tuples.
183
183
184 The tags cache is read and updated as a side-effect of calling.
184 The tags cache is read and updated as a side-effect of calling.
185 '''
185 '''
186 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
186 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
187 if cachetags is not None:
187 if cachetags is not None:
188 assert not shouldwrite
188 assert not shouldwrite
189 # XXX is this really 100% correct? are there oddball special
189 # XXX is this really 100% correct? are there oddball special
190 # cases where a global tag should outrank a local tag but won't,
190 # cases where a global tag should outrank a local tag but won't,
191 # because cachetags does not contain rank info?
191 # because cachetags does not contain rank info?
192 alltags = {}
192 alltags = {}
193 _updatetags(cachetags, alltags)
193 _updatetags(cachetags, alltags)
194 return alltags
194 return alltags
195
195
196 for head in reversed(heads): # oldest to newest
196 for head in reversed(heads): # oldest to newest
197 assert repo.changelog.index.has_node(
197 assert repo.changelog.index.has_node(
198 head
198 head
199 ), b"tag cache returned bogus head %s" % short(head)
199 ), b"tag cache returned bogus head %s" % short(head)
200 fnodes = _filterfnodes(tagfnode, reversed(heads))
200 fnodes = _filterfnodes(tagfnode, reversed(heads))
201 alltags = _tagsfromfnodes(ui, repo, fnodes)
201 alltags = _tagsfromfnodes(ui, repo, fnodes)
202
202
203 # and update the cache (if necessary)
203 # and update the cache (if necessary)
204 if shouldwrite:
204 if shouldwrite:
205 _writetagcache(ui, repo, valid, alltags)
205 _writetagcache(ui, repo, valid, alltags)
206 return alltags
206 return alltags
207
207
208
208
209 def _filterfnodes(tagfnode, nodes):
209 def _filterfnodes(tagfnode, nodes):
210 """return a list of unique fnodes
210 """return a list of unique fnodes
211
211
212 The order of this list matches the order of "nodes". Preserving this order
212 The order of this list matches the order of "nodes". Preserving this order
213 is important as reading tags in different order provides different
213 is important as reading tags in different order provides different
214 results."""
214 results."""
215 seen = set() # set of fnode
215 seen = set() # set of fnode
216 fnodes = []
216 fnodes = []
217 for no in nodes: # oldest to newest
217 for no in nodes: # oldest to newest
218 fnode = tagfnode.get(no)
218 fnode = tagfnode.get(no)
219 if fnode and fnode not in seen:
219 if fnode and fnode not in seen:
220 seen.add(fnode)
220 seen.add(fnode)
221 fnodes.append(fnode)
221 fnodes.append(fnode)
222 return fnodes
222 return fnodes
223
223
224
224
225 def _tagsfromfnodes(ui, repo, fnodes):
225 def _tagsfromfnodes(ui, repo, fnodes):
226 """return a tagsmap from a list of file-node
226 """return a tagsmap from a list of file-node
227
227
228 tagsmap: tag name to (node, hist) 2-tuples.
228 tagsmap: tag name to (node, hist) 2-tuples.
229
229
230 The order of the list matters."""
230 The order of the list matters."""
231 alltags = {}
231 alltags = {}
232 fctx = None
232 fctx = None
233 for fnode in fnodes:
233 for fnode in fnodes:
234 if fctx is None:
234 if fctx is None:
235 fctx = repo.filectx(b'.hgtags', fileid=fnode)
235 fctx = repo.filectx(b'.hgtags', fileid=fnode)
236 else:
236 else:
237 fctx = fctx.filectx(fnode)
237 fctx = fctx.filectx(fnode)
238 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
238 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
239 _updatetags(filetags, alltags)
239 _updatetags(filetags, alltags)
240 return alltags
240 return alltags
241
241
242
242
243 def readlocaltags(ui, repo, alltags, tagtypes):
243 def readlocaltags(ui, repo, alltags, tagtypes):
244 '''Read local tags in repo. Update alltags and tagtypes.'''
244 '''Read local tags in repo. Update alltags and tagtypes.'''
245 try:
245 try:
246 data = repo.vfs.read(b"localtags")
246 data = repo.vfs.read(b"localtags")
247 except IOError as inst:
247 except IOError as inst:
248 if inst.errno != errno.ENOENT:
248 if inst.errno != errno.ENOENT:
249 raise
249 raise
250 return
250 return
251
251
252 # localtags is in the local encoding; re-encode to UTF-8 on
252 # localtags is in the local encoding; re-encode to UTF-8 on
253 # input for consistency with the rest of this module.
253 # input for consistency with the rest of this module.
254 filetags = _readtags(
254 filetags = _readtags(
255 ui, repo, data.splitlines(), b"localtags", recode=encoding.fromlocal
255 ui, repo, data.splitlines(), b"localtags", recode=encoding.fromlocal
256 )
256 )
257
257
258 # remove tags pointing to invalid nodes
258 # remove tags pointing to invalid nodes
259 cl = repo.changelog
259 cl = repo.changelog
260 for t in list(filetags):
260 for t in list(filetags):
261 try:
261 try:
262 cl.rev(filetags[t][0])
262 cl.rev(filetags[t][0])
263 except (LookupError, ValueError):
263 except (LookupError, ValueError):
264 del filetags[t]
264 del filetags[t]
265
265
266 _updatetags(filetags, alltags, b'local', tagtypes)
266 _updatetags(filetags, alltags, b'local', tagtypes)
267
267
268
268
269 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
269 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
270 '''Read tag definitions from a file (or any source of lines).
270 '''Read tag definitions from a file (or any source of lines).
271
271
272 This function returns two sortdicts with similar information:
272 This function returns two sortdicts with similar information:
273
273
274 - the first dict, bintaghist, contains the tag information as expected by
274 - the first dict, bintaghist, contains the tag information as expected by
275 the _readtags function, i.e. a mapping from tag name to (node, hist):
275 the _readtags function, i.e. a mapping from tag name to (node, hist):
276 - node is the node id from the last line read for that name,
276 - node is the node id from the last line read for that name,
277 - hist is the list of node ids previously associated with it (in file
277 - hist is the list of node ids previously associated with it (in file
278 order). All node ids are binary, not hex.
278 order). All node ids are binary, not hex.
279
279
280 - the second dict, hextaglines, is a mapping from tag name to a list of
280 - the second dict, hextaglines, is a mapping from tag name to a list of
281 [hexnode, line number] pairs, ordered from the oldest to the newest node.
281 [hexnode, line number] pairs, ordered from the oldest to the newest node.
282
282
283 When calcnodelines is False the hextaglines dict is not calculated (an
283 When calcnodelines is False the hextaglines dict is not calculated (an
284 empty dict is returned). This is done to improve this function's
284 empty dict is returned). This is done to improve this function's
285 performance in cases where the line numbers are not needed.
285 performance in cases where the line numbers are not needed.
286 '''
286 '''
287
287
288 bintaghist = util.sortdict()
288 bintaghist = util.sortdict()
289 hextaglines = util.sortdict()
289 hextaglines = util.sortdict()
290 count = 0
290 count = 0
291
291
292 def dbg(msg):
292 def dbg(msg):
293 ui.debug(b"%s, line %d: %s\n" % (fn, count, msg))
293 ui.debug(b"%s, line %d: %s\n" % (fn, count, msg))
294
294
295 for nline, line in enumerate(lines):
295 for nline, line in enumerate(lines):
296 count += 1
296 count += 1
297 if not line:
297 if not line:
298 continue
298 continue
299 try:
299 try:
300 (nodehex, name) = line.split(b" ", 1)
300 (nodehex, name) = line.split(b" ", 1)
301 except ValueError:
301 except ValueError:
302 dbg(b"cannot parse entry")
302 dbg(b"cannot parse entry")
303 continue
303 continue
304 name = name.strip()
304 name = name.strip()
305 if recode:
305 if recode:
306 name = recode(name)
306 name = recode(name)
307 try:
307 try:
308 nodebin = bin(nodehex)
308 nodebin = bin(nodehex)
309 except TypeError:
309 except TypeError:
310 dbg(b"node '%s' is not well formed" % nodehex)
310 dbg(b"node '%s' is not well formed" % nodehex)
311 continue
311 continue
312
312
313 # update filetags
313 # update filetags
314 if calcnodelines:
314 if calcnodelines:
315 # map tag name to a list of line numbers
315 # map tag name to a list of line numbers
316 if name not in hextaglines:
316 if name not in hextaglines:
317 hextaglines[name] = []
317 hextaglines[name] = []
318 hextaglines[name].append([nodehex, nline])
318 hextaglines[name].append([nodehex, nline])
319 continue
319 continue
320 # map tag name to (node, hist)
320 # map tag name to (node, hist)
321 if name not in bintaghist:
321 if name not in bintaghist:
322 bintaghist[name] = []
322 bintaghist[name] = []
323 bintaghist[name].append(nodebin)
323 bintaghist[name].append(nodebin)
324 return bintaghist, hextaglines
324 return bintaghist, hextaglines
325
325
326
326
327 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
327 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
328 '''Read tag definitions from a file (or any source of lines).
328 '''Read tag definitions from a file (or any source of lines).
329
329
330 Returns a mapping from tag name to (node, hist).
330 Returns a mapping from tag name to (node, hist).
331
331
332 "node" is the node id from the last line read for that name. "hist"
332 "node" is the node id from the last line read for that name. "hist"
333 is the list of node ids previously associated with it (in file order).
333 is the list of node ids previously associated with it (in file order).
334 All node ids are binary, not hex.
334 All node ids are binary, not hex.
335 '''
335 '''
336 filetags, nodelines = _readtaghist(
336 filetags, nodelines = _readtaghist(
337 ui, repo, lines, fn, recode=recode, calcnodelines=calcnodelines
337 ui, repo, lines, fn, recode=recode, calcnodelines=calcnodelines
338 )
338 )
339 # util.sortdict().__setitem__ is much slower at replacing then inserting
339 # util.sortdict().__setitem__ is much slower at replacing then inserting
340 # new entries. The difference can matter if there are thousands of tags.
340 # new entries. The difference can matter if there are thousands of tags.
341 # Create a new sortdict to avoid the performance penalty.
341 # Create a new sortdict to avoid the performance penalty.
342 newtags = util.sortdict()
342 newtags = util.sortdict()
343 for tag, taghist in filetags.items():
343 for tag, taghist in filetags.items():
344 newtags[tag] = (taghist[-1], taghist[:-1])
344 newtags[tag] = (taghist[-1], taghist[:-1])
345 return newtags
345 return newtags
346
346
347
347
348 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
348 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
349 """Incorporate the tag info read from one file into dictionnaries
349 """Incorporate the tag info read from one file into dictionnaries
350
350
351 The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
351 The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
352
352
353 The second one, 'tagtypes', is optional and will be updated to track the
353 The second one, 'tagtypes', is optional and will be updated to track the
354 "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
354 "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
355 needs to be set."""
355 needs to be set."""
356 if tagtype is None:
356 if tagtype is None:
357 assert tagtypes is None
357 assert tagtypes is None
358
358
359 for name, nodehist in pycompat.iteritems(filetags):
359 for name, nodehist in pycompat.iteritems(filetags):
360 if name not in alltags:
360 if name not in alltags:
361 alltags[name] = nodehist
361 alltags[name] = nodehist
362 if tagtype is not None:
362 if tagtype is not None:
363 tagtypes[name] = tagtype
363 tagtypes[name] = tagtype
364 continue
364 continue
365
365
366 # we prefer alltags[name] if:
366 # we prefer alltags[name] if:
367 # it supersedes us OR
367 # it supersedes us OR
368 # mutual supersedes and it has a higher rank
368 # mutual supersedes and it has a higher rank
369 # otherwise we win because we're tip-most
369 # otherwise we win because we're tip-most
370 anode, ahist = nodehist
370 anode, ahist = nodehist
371 bnode, bhist = alltags[name]
371 bnode, bhist = alltags[name]
372 if (
372 if (
373 bnode != anode
373 bnode != anode
374 and anode in bhist
374 and anode in bhist
375 and (bnode not in ahist or len(bhist) > len(ahist))
375 and (bnode not in ahist or len(bhist) > len(ahist))
376 ):
376 ):
377 anode = bnode
377 anode = bnode
378 elif tagtype is not None:
378 elif tagtype is not None:
379 tagtypes[name] = tagtype
379 tagtypes[name] = tagtype
380 ahist.extend([n for n in bhist if n not in ahist])
380 ahist.extend([n for n in bhist if n not in ahist])
381 alltags[name] = anode, ahist
381 alltags[name] = anode, ahist
382
382
383
383
384 def _filename(repo):
384 def _filename(repo):
385 """name of a tagcache file for a given repo or repoview"""
385 """name of a tagcache file for a given repo or repoview"""
386 filename = b'tags2'
386 filename = b'tags2'
387 if repo.filtername:
387 if repo.filtername:
388 filename = b'%s-%s' % (filename, repo.filtername)
388 filename = b'%s-%s' % (filename, repo.filtername)
389 return filename
389 return filename
390
390
391
391
392 def _readtagcache(ui, repo):
392 def _readtagcache(ui, repo):
393 '''Read the tag cache.
393 '''Read the tag cache.
394
394
395 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
395 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
396
396
397 If the cache is completely up-to-date, "cachetags" is a dict of the
397 If the cache is completely up-to-date, "cachetags" is a dict of the
398 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
398 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
399 None and "shouldwrite" is False.
399 None and "shouldwrite" is False.
400
400
401 If the cache is not up to date, "cachetags" is None. "heads" is a list
401 If the cache is not up to date, "cachetags" is None. "heads" is a list
402 of all heads currently in the repository, ordered from tip to oldest.
402 of all heads currently in the repository, ordered from tip to oldest.
403 "validinfo" is a tuple describing cache validation info. This is used
403 "validinfo" is a tuple describing cache validation info. This is used
404 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
404 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
405 filenode. "shouldwrite" is True.
405 filenode. "shouldwrite" is True.
406
406
407 If the cache is not up to date, the caller is responsible for reading tag
407 If the cache is not up to date, the caller is responsible for reading tag
408 info from each returned head. (See findglobaltags().)
408 info from each returned head. (See findglobaltags().)
409 '''
409 '''
410 try:
410 try:
411 cachefile = repo.cachevfs(_filename(repo), b'r')
411 cachefile = repo.cachevfs(_filename(repo), b'r')
412 # force reading the file for static-http
412 # force reading the file for static-http
413 cachelines = iter(cachefile)
413 cachelines = iter(cachefile)
414 except IOError:
414 except IOError:
415 cachefile = None
415 cachefile = None
416
416
417 cacherev = None
417 cacherev = None
418 cachenode = None
418 cachenode = None
419 cachehash = None
419 cachehash = None
420 if cachefile:
420 if cachefile:
421 try:
421 try:
422 validline = next(cachelines)
422 validline = next(cachelines)
423 validline = validline.split()
423 validline = validline.split()
424 cacherev = int(validline[0])
424 cacherev = int(validline[0])
425 cachenode = bin(validline[1])
425 cachenode = bin(validline[1])
426 if len(validline) > 2:
426 if len(validline) > 2:
427 cachehash = bin(validline[2])
427 cachehash = bin(validline[2])
428 except Exception:
428 except Exception:
429 # corruption of the cache, just recompute it.
429 # corruption of the cache, just recompute it.
430 pass
430 pass
431
431
432 tipnode = repo.changelog.tip()
432 tipnode = repo.changelog.tip()
433 tiprev = len(repo.changelog) - 1
433 tiprev = len(repo.changelog) - 1
434
434
435 # Case 1 (common): tip is the same, so nothing has changed.
435 # Case 1 (common): tip is the same, so nothing has changed.
436 # (Unchanged tip trivially means no changesets have been added.
436 # (Unchanged tip trivially means no changesets have been added.
437 # But, thanks to localrepository.destroyed(), it also means none
437 # But, thanks to localrepository.destroyed(), it also means none
438 # have been destroyed by strip or rollback.)
438 # have been destroyed by strip or rollback.)
439 if (
439 if (
440 cacherev == tiprev
440 cacherev == tiprev
441 and cachenode == tipnode
441 and cachenode == tipnode
442 and cachehash == scmutil.filteredhash(repo, tiprev)
442 and cachehash == scmutil.filteredhash(repo, tiprev)
443 ):
443 ):
444 tags = _readtags(ui, repo, cachelines, cachefile.name)
444 tags = _readtags(ui, repo, cachelines, cachefile.name)
445 cachefile.close()
445 cachefile.close()
446 return (None, None, None, tags, False)
446 return (None, None, None, tags, False)
447 if cachefile:
447 if cachefile:
448 cachefile.close() # ignore rest of file
448 cachefile.close() # ignore rest of file
449
449
450 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
450 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
451
451
452 repoheads = repo.heads()
452 repoheads = repo.heads()
453 # Case 2 (uncommon): empty repo; get out quickly and don't bother
453 # Case 2 (uncommon): empty repo; get out quickly and don't bother
454 # writing an empty cache.
454 # writing an empty cache.
455 if repoheads == [nullid]:
455 if repoheads == [nullid]:
456 return ([], {}, valid, {}, False)
456 return ([], {}, valid, {}, False)
457
457
458 # Case 3 (uncommon): cache file missing or empty.
458 # Case 3 (uncommon): cache file missing or empty.
459
459
460 # Case 4 (uncommon): tip rev decreased. This should only happen
460 # Case 4 (uncommon): tip rev decreased. This should only happen
461 # when we're called from localrepository.destroyed(). Refresh the
461 # when we're called from localrepository.destroyed(). Refresh the
462 # cache so future invocations will not see disappeared heads in the
462 # cache so future invocations will not see disappeared heads in the
463 # cache.
463 # cache.
464
464
465 # Case 5 (common): tip has changed, so we've added/replaced heads.
465 # Case 5 (common): tip has changed, so we've added/replaced heads.
466
466
467 # As it happens, the code to handle cases 3, 4, 5 is the same.
467 # As it happens, the code to handle cases 3, 4, 5 is the same.
468
468
469 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
469 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
470 # exposed".
470 # exposed".
471 if not len(repo.file(b'.hgtags')):
471 if not len(repo.file(b'.hgtags')):
472 # No tags have ever been committed, so we can avoid a
472 # No tags have ever been committed, so we can avoid a
473 # potentially expensive search.
473 # potentially expensive search.
474 return ([], {}, valid, None, True)
474 return ([], {}, valid, None, True)
475
475
476 # Now we have to lookup the .hgtags filenode for every new head.
476 # Now we have to lookup the .hgtags filenode for every new head.
477 # This is the most expensive part of finding tags, so performance
477 # This is the most expensive part of finding tags, so performance
478 # depends primarily on the size of newheads. Worst case: no cache
478 # depends primarily on the size of newheads. Worst case: no cache
479 # file, so newheads == repoheads.
479 # file, so newheads == repoheads.
480 # Reversed order helps the cache ('repoheads' is in descending order)
480 # Reversed order helps the cache ('repoheads' is in descending order)
481 cachefnode = _getfnodes(ui, repo, reversed(repoheads))
481 cachefnode = _getfnodes(ui, repo, reversed(repoheads))
482
482
483 # Caller has to iterate over all heads, but can use the filenodes in
483 # Caller has to iterate over all heads, but can use the filenodes in
484 # cachefnode to get to each .hgtags revision quickly.
484 # cachefnode to get to each .hgtags revision quickly.
485 return (repoheads, cachefnode, valid, None, True)
485 return (repoheads, cachefnode, valid, None, True)
486
486
487
487
488 def _getfnodes(ui, repo, nodes):
488 def _getfnodes(ui, repo, nodes):
489 """return .hgtags fnodes for a list of changeset nodes
489 """return .hgtags fnodes for a list of changeset nodes
490
490
491 Return value is a {node: fnode} mapping. There will be no entry for nodes
491 Return value is a {node: fnode} mapping. There will be no entry for nodes
492 without a '.hgtags' file.
492 without a '.hgtags' file.
493 """
493 """
494 starttime = util.timer()
494 starttime = util.timer()
495 fnodescache = hgtagsfnodescache(repo.unfiltered())
495 fnodescache = hgtagsfnodescache(repo.unfiltered())
496 cachefnode = {}
496 cachefnode = {}
497 for node in nodes:
497 for node in nodes:
498 fnode = fnodescache.getfnode(node)
498 fnode = fnodescache.getfnode(node)
499 if fnode != nullid:
499 if fnode != nullid:
500 cachefnode[node] = fnode
500 cachefnode[node] = fnode
501
501
502 fnodescache.write()
502 fnodescache.write()
503
503
504 duration = util.timer() - starttime
504 duration = util.timer() - starttime
505 ui.log(
505 ui.log(
506 b'tagscache',
506 b'tagscache',
507 b'%d/%d cache hits/lookups in %0.4f seconds\n',
507 b'%d/%d cache hits/lookups in %0.4f seconds\n',
508 fnodescache.hitcount,
508 fnodescache.hitcount,
509 fnodescache.lookupcount,
509 fnodescache.lookupcount,
510 duration,
510 duration,
511 )
511 )
512 return cachefnode
512 return cachefnode
513
513
514
514
515 def _writetagcache(ui, repo, valid, cachetags):
515 def _writetagcache(ui, repo, valid, cachetags):
516 filename = _filename(repo)
516 filename = _filename(repo)
517 try:
517 try:
518 cachefile = repo.cachevfs(filename, b'w', atomictemp=True)
518 cachefile = repo.cachevfs(filename, b'w', atomictemp=True)
519 except (OSError, IOError):
519 except (OSError, IOError):
520 return
520 return
521
521
522 ui.log(
522 ui.log(
523 b'tagscache',
523 b'tagscache',
524 b'writing .hg/cache/%s with %d tags\n',
524 b'writing .hg/cache/%s with %d tags\n',
525 filename,
525 filename,
526 len(cachetags),
526 len(cachetags),
527 )
527 )
528
528
529 if valid[2]:
529 if valid[2]:
530 cachefile.write(
530 cachefile.write(
531 b'%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2]))
531 b'%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2]))
532 )
532 )
533 else:
533 else:
534 cachefile.write(b'%d %s\n' % (valid[0], hex(valid[1])))
534 cachefile.write(b'%d %s\n' % (valid[0], hex(valid[1])))
535
535
536 # Tag names in the cache are in UTF-8 -- which is the whole reason
536 # Tag names in the cache are in UTF-8 -- which is the whole reason
537 # we keep them in UTF-8 throughout this module. If we converted
537 # we keep them in UTF-8 throughout this module. If we converted
538 # them local encoding on input, we would lose info writing them to
538 # them local encoding on input, we would lose info writing them to
539 # the cache.
539 # the cache.
540 for (name, (node, hist)) in sorted(pycompat.iteritems(cachetags)):
540 for (name, (node, hist)) in sorted(pycompat.iteritems(cachetags)):
541 for n in hist:
541 for n in hist:
542 cachefile.write(b"%s %s\n" % (hex(n), name))
542 cachefile.write(b"%s %s\n" % (hex(n), name))
543 cachefile.write(b"%s %s\n" % (hex(node), name))
543 cachefile.write(b"%s %s\n" % (hex(node), name))
544
544
545 try:
545 try:
546 cachefile.close()
546 cachefile.close()
547 except (OSError, IOError):
547 except (OSError, IOError):
548 pass
548 pass
549
549
550
550
551 def tag(repo, names, node, message, local, user, date, editor=False):
551 def tag(repo, names, node, message, local, user, date, editor=False):
552 '''tag a revision with one or more symbolic names.
552 '''tag a revision with one or more symbolic names.
553
553
554 names is a list of strings or, when adding a single tag, names may be a
554 names is a list of strings or, when adding a single tag, names may be a
555 string.
555 string.
556
556
557 if local is True, the tags are stored in a per-repository file.
557 if local is True, the tags are stored in a per-repository file.
558 otherwise, they are stored in the .hgtags file, and a new
558 otherwise, they are stored in the .hgtags file, and a new
559 changeset is committed with the change.
559 changeset is committed with the change.
560
560
561 keyword arguments:
561 keyword arguments:
562
562
563 local: whether to store tags in non-version-controlled file
563 local: whether to store tags in non-version-controlled file
564 (default False)
564 (default False)
565
565
566 message: commit message to use if committing
566 message: commit message to use if committing
567
567
568 user: name of user to use if committing
568 user: name of user to use if committing
569
569
570 date: date tuple to use if committing'''
570 date: date tuple to use if committing'''
571
571
572 if not local:
572 if not local:
573 m = matchmod.exact([b'.hgtags'])
573 m = matchmod.exact([b'.hgtags'])
574 if any(repo.status(match=m, unknown=True, ignored=True)):
574 st = repo.status(match=m, unknown=True, ignored=True)
575 if any(
576 (
577 st.modified,
578 st.added,
579 st.removed,
580 st.deleted,
581 st.unknown,
582 st.ignored,
583 st.clean,
584 )
585 ):
575 raise error.Abort(
586 raise error.Abort(
576 _(b'working copy of .hgtags is changed'),
587 _(b'working copy of .hgtags is changed'),
577 hint=_(b'please commit .hgtags manually'),
588 hint=_(b'please commit .hgtags manually'),
578 )
589 )
579
590
580 with repo.wlock():
591 with repo.wlock():
581 repo.tags() # instantiate the cache
592 repo.tags() # instantiate the cache
582 _tag(repo, names, node, message, local, user, date, editor=editor)
593 _tag(repo, names, node, message, local, user, date, editor=editor)
583
594
584
595
585 def _tag(
596 def _tag(
586 repo, names, node, message, local, user, date, extra=None, editor=False
597 repo, names, node, message, local, user, date, extra=None, editor=False
587 ):
598 ):
588 if isinstance(names, bytes):
599 if isinstance(names, bytes):
589 names = (names,)
600 names = (names,)
590
601
591 branches = repo.branchmap()
602 branches = repo.branchmap()
592 for name in names:
603 for name in names:
593 repo.hook(b'pretag', throw=True, node=hex(node), tag=name, local=local)
604 repo.hook(b'pretag', throw=True, node=hex(node), tag=name, local=local)
594 if name in branches:
605 if name in branches:
595 repo.ui.warn(
606 repo.ui.warn(
596 _(b"warning: tag %s conflicts with existing branch name\n")
607 _(b"warning: tag %s conflicts with existing branch name\n")
597 % name
608 % name
598 )
609 )
599
610
600 def writetags(fp, names, munge, prevtags):
611 def writetags(fp, names, munge, prevtags):
601 fp.seek(0, io.SEEK_END)
612 fp.seek(0, io.SEEK_END)
602 if prevtags and not prevtags.endswith(b'\n'):
613 if prevtags and not prevtags.endswith(b'\n'):
603 fp.write(b'\n')
614 fp.write(b'\n')
604 for name in names:
615 for name in names:
605 if munge:
616 if munge:
606 m = munge(name)
617 m = munge(name)
607 else:
618 else:
608 m = name
619 m = name
609
620
610 if repo._tagscache.tagtypes and name in repo._tagscache.tagtypes:
621 if repo._tagscache.tagtypes and name in repo._tagscache.tagtypes:
611 old = repo.tags().get(name, nullid)
622 old = repo.tags().get(name, nullid)
612 fp.write(b'%s %s\n' % (hex(old), m))
623 fp.write(b'%s %s\n' % (hex(old), m))
613 fp.write(b'%s %s\n' % (hex(node), m))
624 fp.write(b'%s %s\n' % (hex(node), m))
614 fp.close()
625 fp.close()
615
626
616 prevtags = b''
627 prevtags = b''
617 if local:
628 if local:
618 try:
629 try:
619 fp = repo.vfs(b'localtags', b'r+')
630 fp = repo.vfs(b'localtags', b'r+')
620 except IOError:
631 except IOError:
621 fp = repo.vfs(b'localtags', b'a')
632 fp = repo.vfs(b'localtags', b'a')
622 else:
633 else:
623 prevtags = fp.read()
634 prevtags = fp.read()
624
635
625 # local tags are stored in the current charset
636 # local tags are stored in the current charset
626 writetags(fp, names, None, prevtags)
637 writetags(fp, names, None, prevtags)
627 for name in names:
638 for name in names:
628 repo.hook(b'tag', node=hex(node), tag=name, local=local)
639 repo.hook(b'tag', node=hex(node), tag=name, local=local)
629 return
640 return
630
641
631 try:
642 try:
632 fp = repo.wvfs(b'.hgtags', b'rb+')
643 fp = repo.wvfs(b'.hgtags', b'rb+')
633 except IOError as e:
644 except IOError as e:
634 if e.errno != errno.ENOENT:
645 if e.errno != errno.ENOENT:
635 raise
646 raise
636 fp = repo.wvfs(b'.hgtags', b'ab')
647 fp = repo.wvfs(b'.hgtags', b'ab')
637 else:
648 else:
638 prevtags = fp.read()
649 prevtags = fp.read()
639
650
640 # committed tags are stored in UTF-8
651 # committed tags are stored in UTF-8
641 writetags(fp, names, encoding.fromlocal, prevtags)
652 writetags(fp, names, encoding.fromlocal, prevtags)
642
653
643 fp.close()
654 fp.close()
644
655
645 repo.invalidatecaches()
656 repo.invalidatecaches()
646
657
647 if b'.hgtags' not in repo.dirstate:
658 if b'.hgtags' not in repo.dirstate:
648 repo[None].add([b'.hgtags'])
659 repo[None].add([b'.hgtags'])
649
660
650 m = matchmod.exact([b'.hgtags'])
661 m = matchmod.exact([b'.hgtags'])
651 tagnode = repo.commit(
662 tagnode = repo.commit(
652 message, user, date, extra=extra, match=m, editor=editor
663 message, user, date, extra=extra, match=m, editor=editor
653 )
664 )
654
665
655 for name in names:
666 for name in names:
656 repo.hook(b'tag', node=hex(node), tag=name, local=local)
667 repo.hook(b'tag', node=hex(node), tag=name, local=local)
657
668
658 return tagnode
669 return tagnode
659
670
660
671
661 _fnodescachefile = b'hgtagsfnodes1'
672 _fnodescachefile = b'hgtagsfnodes1'
662 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
673 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
663 _fnodesmissingrec = b'\xff' * 24
674 _fnodesmissingrec = b'\xff' * 24
664
675
665
676
666 class hgtagsfnodescache(object):
677 class hgtagsfnodescache(object):
667 """Persistent cache mapping revisions to .hgtags filenodes.
678 """Persistent cache mapping revisions to .hgtags filenodes.
668
679
669 The cache is an array of records. Each item in the array corresponds to
680 The cache is an array of records. Each item in the array corresponds to
670 a changelog revision. Values in the array contain the first 4 bytes of
681 a changelog revision. Values in the array contain the first 4 bytes of
671 the node hash and the 20 bytes .hgtags filenode for that revision.
682 the node hash and the 20 bytes .hgtags filenode for that revision.
672
683
673 The first 4 bytes are present as a form of verification. Repository
684 The first 4 bytes are present as a form of verification. Repository
674 stripping and rewriting may change the node at a numeric revision in the
685 stripping and rewriting may change the node at a numeric revision in the
675 changelog. The changeset fragment serves as a verifier to detect
686 changelog. The changeset fragment serves as a verifier to detect
676 rewriting. This logic is shared with the rev branch cache (see
687 rewriting. This logic is shared with the rev branch cache (see
677 branchmap.py).
688 branchmap.py).
678
689
679 The instance holds in memory the full cache content but entries are
690 The instance holds in memory the full cache content but entries are
680 only parsed on read.
691 only parsed on read.
681
692
682 Instances behave like lists. ``c[i]`` works where i is a rev or
693 Instances behave like lists. ``c[i]`` works where i is a rev or
683 changeset node. Missing indexes are populated automatically on access.
694 changeset node. Missing indexes are populated automatically on access.
684 """
695 """
685
696
686 def __init__(self, repo):
697 def __init__(self, repo):
687 assert repo.filtername is None
698 assert repo.filtername is None
688
699
689 self._repo = repo
700 self._repo = repo
690
701
691 # Only for reporting purposes.
702 # Only for reporting purposes.
692 self.lookupcount = 0
703 self.lookupcount = 0
693 self.hitcount = 0
704 self.hitcount = 0
694
705
695 try:
706 try:
696 data = repo.cachevfs.read(_fnodescachefile)
707 data = repo.cachevfs.read(_fnodescachefile)
697 except (OSError, IOError):
708 except (OSError, IOError):
698 data = b""
709 data = b""
699 self._raw = bytearray(data)
710 self._raw = bytearray(data)
700
711
701 # The end state of self._raw is an array that is of the exact length
712 # The end state of self._raw is an array that is of the exact length
702 # required to hold a record for every revision in the repository.
713 # required to hold a record for every revision in the repository.
703 # We truncate or extend the array as necessary. self._dirtyoffset is
714 # We truncate or extend the array as necessary. self._dirtyoffset is
704 # defined to be the start offset at which we need to write the output
715 # defined to be the start offset at which we need to write the output
705 # file. This offset is also adjusted when new entries are calculated
716 # file. This offset is also adjusted when new entries are calculated
706 # for array members.
717 # for array members.
707 cllen = len(repo.changelog)
718 cllen = len(repo.changelog)
708 wantedlen = cllen * _fnodesrecsize
719 wantedlen = cllen * _fnodesrecsize
709 rawlen = len(self._raw)
720 rawlen = len(self._raw)
710
721
711 self._dirtyoffset = None
722 self._dirtyoffset = None
712
723
713 if rawlen < wantedlen:
724 if rawlen < wantedlen:
714 self._dirtyoffset = rawlen
725 self._dirtyoffset = rawlen
715 self._raw.extend(b'\xff' * (wantedlen - rawlen))
726 self._raw.extend(b'\xff' * (wantedlen - rawlen))
716 elif rawlen > wantedlen:
727 elif rawlen > wantedlen:
717 # There's no easy way to truncate array instances. This seems
728 # There's no easy way to truncate array instances. This seems
718 # slightly less evil than copying a potentially large array slice.
729 # slightly less evil than copying a potentially large array slice.
719 for i in range(rawlen - wantedlen):
730 for i in range(rawlen - wantedlen):
720 self._raw.pop()
731 self._raw.pop()
721 self._dirtyoffset = len(self._raw)
732 self._dirtyoffset = len(self._raw)
722
733
723 def getfnode(self, node, computemissing=True):
734 def getfnode(self, node, computemissing=True):
724 """Obtain the filenode of the .hgtags file at a specified revision.
735 """Obtain the filenode of the .hgtags file at a specified revision.
725
736
726 If the value is in the cache, the entry will be validated and returned.
737 If the value is in the cache, the entry will be validated and returned.
727 Otherwise, the filenode will be computed and returned unless
738 Otherwise, the filenode will be computed and returned unless
728 "computemissing" is False, in which case None will be returned without
739 "computemissing" is False, in which case None will be returned without
729 any potentially expensive computation being performed.
740 any potentially expensive computation being performed.
730
741
731 If an .hgtags does not exist at the specified revision, nullid is
742 If an .hgtags does not exist at the specified revision, nullid is
732 returned.
743 returned.
733 """
744 """
734 if node == nullid:
745 if node == nullid:
735 return nullid
746 return nullid
736
747
737 ctx = self._repo[node]
748 ctx = self._repo[node]
738 rev = ctx.rev()
749 rev = ctx.rev()
739
750
740 self.lookupcount += 1
751 self.lookupcount += 1
741
752
742 offset = rev * _fnodesrecsize
753 offset = rev * _fnodesrecsize
743 record = b'%s' % self._raw[offset : offset + _fnodesrecsize]
754 record = b'%s' % self._raw[offset : offset + _fnodesrecsize]
744 properprefix = node[0:4]
755 properprefix = node[0:4]
745
756
746 # Validate and return existing entry.
757 # Validate and return existing entry.
747 if record != _fnodesmissingrec:
758 if record != _fnodesmissingrec:
748 fileprefix = record[0:4]
759 fileprefix = record[0:4]
749
760
750 if fileprefix == properprefix:
761 if fileprefix == properprefix:
751 self.hitcount += 1
762 self.hitcount += 1
752 return record[4:]
763 return record[4:]
753
764
754 # Fall through.
765 # Fall through.
755
766
756 # If we get here, the entry is either missing or invalid.
767 # If we get here, the entry is either missing or invalid.
757
768
758 if not computemissing:
769 if not computemissing:
759 return None
770 return None
760
771
761 fnode = None
772 fnode = None
762 cl = self._repo.changelog
773 cl = self._repo.changelog
763 p1rev, p2rev = cl._uncheckedparentrevs(rev)
774 p1rev, p2rev = cl._uncheckedparentrevs(rev)
764 p1node = cl.node(p1rev)
775 p1node = cl.node(p1rev)
765 p1fnode = self.getfnode(p1node, computemissing=False)
776 p1fnode = self.getfnode(p1node, computemissing=False)
766 if p2rev != nullrev:
777 if p2rev != nullrev:
767 # There is some no-merge changeset where p1 is null and p2 is set
778 # There is some no-merge changeset where p1 is null and p2 is set
768 # Processing them as merge is just slower, but still gives a good
779 # Processing them as merge is just slower, but still gives a good
769 # result.
780 # result.
770 p2node = cl.node(p1rev)
781 p2node = cl.node(p1rev)
771 p2fnode = self.getfnode(p2node, computemissing=False)
782 p2fnode = self.getfnode(p2node, computemissing=False)
772 if p1fnode != p2fnode:
783 if p1fnode != p2fnode:
773 # we cannot rely on readfast because we don't know against what
784 # we cannot rely on readfast because we don't know against what
774 # parent the readfast delta is computed
785 # parent the readfast delta is computed
775 p1fnode = None
786 p1fnode = None
776 if p1fnode is not None:
787 if p1fnode is not None:
777 mctx = ctx.manifestctx()
788 mctx = ctx.manifestctx()
778 fnode = mctx.readfast().get(b'.hgtags')
789 fnode = mctx.readfast().get(b'.hgtags')
779 if fnode is None:
790 if fnode is None:
780 fnode = p1fnode
791 fnode = p1fnode
781 if fnode is None:
792 if fnode is None:
782 # Populate missing entry.
793 # Populate missing entry.
783 try:
794 try:
784 fnode = ctx.filenode(b'.hgtags')
795 fnode = ctx.filenode(b'.hgtags')
785 except error.LookupError:
796 except error.LookupError:
786 # No .hgtags file on this revision.
797 # No .hgtags file on this revision.
787 fnode = nullid
798 fnode = nullid
788
799
789 self._writeentry(offset, properprefix, fnode)
800 self._writeentry(offset, properprefix, fnode)
790 return fnode
801 return fnode
791
802
792 def setfnode(self, node, fnode):
803 def setfnode(self, node, fnode):
793 """Set the .hgtags filenode for a given changeset."""
804 """Set the .hgtags filenode for a given changeset."""
794 assert len(fnode) == 20
805 assert len(fnode) == 20
795 ctx = self._repo[node]
806 ctx = self._repo[node]
796
807
797 # Do a lookup first to avoid writing if nothing has changed.
808 # Do a lookup first to avoid writing if nothing has changed.
798 if self.getfnode(ctx.node(), computemissing=False) == fnode:
809 if self.getfnode(ctx.node(), computemissing=False) == fnode:
799 return
810 return
800
811
801 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
812 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
802
813
803 def _writeentry(self, offset, prefix, fnode):
814 def _writeentry(self, offset, prefix, fnode):
804 # Slices on array instances only accept other array.
815 # Slices on array instances only accept other array.
805 entry = bytearray(prefix + fnode)
816 entry = bytearray(prefix + fnode)
806 self._raw[offset : offset + _fnodesrecsize] = entry
817 self._raw[offset : offset + _fnodesrecsize] = entry
807 # self._dirtyoffset could be None.
818 # self._dirtyoffset could be None.
808 self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0)
819 self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0)
809
820
810 def write(self):
821 def write(self):
811 """Perform all necessary writes to cache file.
822 """Perform all necessary writes to cache file.
812
823
813 This may no-op if no writes are needed or if a write lock could
824 This may no-op if no writes are needed or if a write lock could
814 not be obtained.
825 not be obtained.
815 """
826 """
816 if self._dirtyoffset is None:
827 if self._dirtyoffset is None:
817 return
828 return
818
829
819 data = self._raw[self._dirtyoffset :]
830 data = self._raw[self._dirtyoffset :]
820 if not data:
831 if not data:
821 return
832 return
822
833
823 repo = self._repo
834 repo = self._repo
824
835
825 try:
836 try:
826 lock = repo.wlock(wait=False)
837 lock = repo.wlock(wait=False)
827 except error.LockError:
838 except error.LockError:
828 repo.ui.log(
839 repo.ui.log(
829 b'tagscache',
840 b'tagscache',
830 b'not writing .hg/cache/%s because '
841 b'not writing .hg/cache/%s because '
831 b'lock cannot be acquired\n' % _fnodescachefile,
842 b'lock cannot be acquired\n' % _fnodescachefile,
832 )
843 )
833 return
844 return
834
845
835 try:
846 try:
836 f = repo.cachevfs.open(_fnodescachefile, b'ab')
847 f = repo.cachevfs.open(_fnodescachefile, b'ab')
837 try:
848 try:
838 # if the file has been truncated
849 # if the file has been truncated
839 actualoffset = f.tell()
850 actualoffset = f.tell()
840 if actualoffset < self._dirtyoffset:
851 if actualoffset < self._dirtyoffset:
841 self._dirtyoffset = actualoffset
852 self._dirtyoffset = actualoffset
842 data = self._raw[self._dirtyoffset :]
853 data = self._raw[self._dirtyoffset :]
843 f.seek(self._dirtyoffset)
854 f.seek(self._dirtyoffset)
844 f.truncate()
855 f.truncate()
845 repo.ui.log(
856 repo.ui.log(
846 b'tagscache',
857 b'tagscache',
847 b'writing %d bytes to cache/%s\n'
858 b'writing %d bytes to cache/%s\n'
848 % (len(data), _fnodescachefile),
859 % (len(data), _fnodescachefile),
849 )
860 )
850 f.write(data)
861 f.write(data)
851 self._dirtyoffset = None
862 self._dirtyoffset = None
852 finally:
863 finally:
853 f.close()
864 f.close()
854 except (IOError, OSError) as inst:
865 except (IOError, OSError) as inst:
855 repo.ui.log(
866 repo.ui.log(
856 b'tagscache',
867 b'tagscache',
857 b"couldn't write cache/%s: %s\n"
868 b"couldn't write cache/%s: %s\n"
858 % (_fnodescachefile, stringutil.forcebytestr(inst)),
869 % (_fnodescachefile, stringutil.forcebytestr(inst)),
859 )
870 )
860 finally:
871 finally:
861 lock.release()
872 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now