##// END OF EJS Templates
tags: avoid double-reversing a list...
Martin von Zweigbergk -
r42425:6770df6e default
parent child Browse files
Show More
@@ -1,815 +1,816 b''
1 # tags.py - read tag info from local repository
1 # tags.py - read tag info from local repository
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 # Currently this module only deals with reading and caching tags.
9 # Currently this module only deals with reading and caching tags.
10 # Eventually, it could take care of updating (adding/removing/moving)
10 # Eventually, it could take care of updating (adding/removing/moving)
11 # tags too.
11 # tags too.
12
12
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import errno
15 import errno
16
16
17 from .node import (
17 from .node import (
18 bin,
18 bin,
19 hex,
19 hex,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 )
23 )
24 from .i18n import _
24 from .i18n import _
25 from . import (
25 from . import (
26 encoding,
26 encoding,
27 error,
27 error,
28 match as matchmod,
28 match as matchmod,
29 scmutil,
29 scmutil,
30 util,
30 util,
31 )
31 )
32 from .utils import (
32 from .utils import (
33 stringutil,
33 stringutil,
34 )
34 )
35
35
36 # Tags computation can be expensive and caches exist to make it fast in
36 # Tags computation can be expensive and caches exist to make it fast in
37 # the common case.
37 # the common case.
38 #
38 #
39 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
39 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
40 # each revision in the repository. The file is effectively an array of
40 # each revision in the repository. The file is effectively an array of
41 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
41 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
42 # details.
42 # details.
43 #
43 #
44 # The .hgtags filenode cache grows in proportion to the length of the
44 # The .hgtags filenode cache grows in proportion to the length of the
45 # changelog. The file is truncated when the # changelog is stripped.
45 # changelog. The file is truncated when the # changelog is stripped.
46 #
46 #
47 # The purpose of the filenode cache is to avoid the most expensive part
47 # The purpose of the filenode cache is to avoid the most expensive part
48 # of finding global tags, which is looking up the .hgtags filenode in the
48 # of finding global tags, which is looking up the .hgtags filenode in the
49 # manifest for each head. This can take dozens or over 100ms for
49 # manifest for each head. This can take dozens or over 100ms for
50 # repositories with very large manifests. Multiplied by dozens or even
50 # repositories with very large manifests. Multiplied by dozens or even
51 # hundreds of heads and there is a significant performance concern.
51 # hundreds of heads and there is a significant performance concern.
52 #
52 #
53 # There also exist a separate cache file for each repository filter.
53 # There also exist a separate cache file for each repository filter.
54 # These "tags-*" files store information about the history of tags.
54 # These "tags-*" files store information about the history of tags.
55 #
55 #
56 # The tags cache files consists of a cache validation line followed by
56 # The tags cache files consists of a cache validation line followed by
57 # a history of tags.
57 # a history of tags.
58 #
58 #
59 # The cache validation line has the format:
59 # The cache validation line has the format:
60 #
60 #
61 # <tiprev> <tipnode> [<filteredhash>]
61 # <tiprev> <tipnode> [<filteredhash>]
62 #
62 #
63 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
63 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
64 # node for that changeset. These redundantly identify the repository
64 # node for that changeset. These redundantly identify the repository
65 # tip from the time the cache was written. In addition, <filteredhash>,
65 # tip from the time the cache was written. In addition, <filteredhash>,
66 # if present, is a 40 character hex hash of the contents of the filtered
66 # if present, is a 40 character hex hash of the contents of the filtered
67 # revisions for this filter. If the set of filtered revs changes, the
67 # revisions for this filter. If the set of filtered revs changes, the
68 # hash will change and invalidate the cache.
68 # hash will change and invalidate the cache.
69 #
69 #
70 # The history part of the tags cache consists of lines of the form:
70 # The history part of the tags cache consists of lines of the form:
71 #
71 #
72 # <node> <tag>
72 # <node> <tag>
73 #
73 #
74 # (This format is identical to that of .hgtags files.)
74 # (This format is identical to that of .hgtags files.)
75 #
75 #
76 # <tag> is the tag name and <node> is the 40 character hex changeset
76 # <tag> is the tag name and <node> is the 40 character hex changeset
77 # the tag is associated with.
77 # the tag is associated with.
78 #
78 #
79 # Tags are written sorted by tag name.
79 # Tags are written sorted by tag name.
80 #
80 #
81 # Tags associated with multiple changesets have an entry for each changeset.
81 # Tags associated with multiple changesets have an entry for each changeset.
82 # The most recent changeset (in terms of revlog ordering for the head
82 # The most recent changeset (in terms of revlog ordering for the head
83 # setting it) for each tag is last.
83 # setting it) for each tag is last.
84
84
85 def fnoderevs(ui, repo, revs):
85 def fnoderevs(ui, repo, revs):
86 """return the list of '.hgtags' fnodes used in a set revisions
86 """return the list of '.hgtags' fnodes used in a set revisions
87
87
88 This is returned as list of unique fnodes. We use a list instead of a set
88 This is returned as list of unique fnodes. We use a list instead of a set
89 because order matters when it comes to tags."""
89 because order matters when it comes to tags."""
90 unfi = repo.unfiltered()
90 unfi = repo.unfiltered()
91 tonode = unfi.changelog.node
91 tonode = unfi.changelog.node
92 nodes = [tonode(r) for r in revs]
92 nodes = [tonode(r) for r in revs]
93 fnodes = _getfnodes(ui, repo, nodes[::-1]) # reversed help the cache
93 fnodes = _getfnodes(ui, repo, nodes)
94 fnodes = _filterfnodes(fnodes, nodes)
94 fnodes = _filterfnodes(fnodes, nodes)
95 return fnodes
95 return fnodes
96
96
97 def _nulltonone(value):
97 def _nulltonone(value):
98 """convert nullid to None
98 """convert nullid to None
99
99
100 For tag value, nullid means "deleted". This small utility function helps
100 For tag value, nullid means "deleted". This small utility function helps
101 translating that to None."""
101 translating that to None."""
102 if value == nullid:
102 if value == nullid:
103 return None
103 return None
104 return value
104 return value
105
105
106 def difftags(ui, repo, oldfnodes, newfnodes):
106 def difftags(ui, repo, oldfnodes, newfnodes):
107 """list differences between tags expressed in two set of file-nodes
107 """list differences between tags expressed in two set of file-nodes
108
108
109 The list contains entries in the form: (tagname, oldvalue, new value).
109 The list contains entries in the form: (tagname, oldvalue, new value).
110 None is used to expressed missing value:
110 None is used to expressed missing value:
111 ('foo', None, 'abcd') is a new tag,
111 ('foo', None, 'abcd') is a new tag,
112 ('bar', 'ef01', None) is a deletion,
112 ('bar', 'ef01', None) is a deletion,
113 ('baz', 'abcd', 'ef01') is a tag movement.
113 ('baz', 'abcd', 'ef01') is a tag movement.
114 """
114 """
115 if oldfnodes == newfnodes:
115 if oldfnodes == newfnodes:
116 return []
116 return []
117 oldtags = _tagsfromfnodes(ui, repo, oldfnodes)
117 oldtags = _tagsfromfnodes(ui, repo, oldfnodes)
118 newtags = _tagsfromfnodes(ui, repo, newfnodes)
118 newtags = _tagsfromfnodes(ui, repo, newfnodes)
119
119
120 # list of (tag, old, new): None means missing
120 # list of (tag, old, new): None means missing
121 entries = []
121 entries = []
122 for tag, (new, __) in newtags.items():
122 for tag, (new, __) in newtags.items():
123 new = _nulltonone(new)
123 new = _nulltonone(new)
124 old, __ = oldtags.pop(tag, (None, None))
124 old, __ = oldtags.pop(tag, (None, None))
125 old = _nulltonone(old)
125 old = _nulltonone(old)
126 if old != new:
126 if old != new:
127 entries.append((tag, old, new))
127 entries.append((tag, old, new))
128 # handle deleted tags
128 # handle deleted tags
129 for tag, (old, __) in oldtags.items():
129 for tag, (old, __) in oldtags.items():
130 old = _nulltonone(old)
130 old = _nulltonone(old)
131 if old is not None:
131 if old is not None:
132 entries.append((tag, old, None))
132 entries.append((tag, old, None))
133 entries.sort()
133 entries.sort()
134 return entries
134 return entries
135
135
136 def writediff(fp, difflist):
136 def writediff(fp, difflist):
137 """write tags diff information to a file.
137 """write tags diff information to a file.
138
138
139 Data are stored with a line based format:
139 Data are stored with a line based format:
140
140
141 <action> <hex-node> <tag-name>\n
141 <action> <hex-node> <tag-name>\n
142
142
143 Action are defined as follow:
143 Action are defined as follow:
144 -R tag is removed,
144 -R tag is removed,
145 +A tag is added,
145 +A tag is added,
146 -M tag is moved (old value),
146 -M tag is moved (old value),
147 +M tag is moved (new value),
147 +M tag is moved (new value),
148
148
149 Example:
149 Example:
150
150
151 +A 875517b4806a848f942811a315a5bce30804ae85 t5
151 +A 875517b4806a848f942811a315a5bce30804ae85 t5
152
152
153 See documentation of difftags output for details about the input.
153 See documentation of difftags output for details about the input.
154 """
154 """
155 add = '+A %s %s\n'
155 add = '+A %s %s\n'
156 remove = '-R %s %s\n'
156 remove = '-R %s %s\n'
157 updateold = '-M %s %s\n'
157 updateold = '-M %s %s\n'
158 updatenew = '+M %s %s\n'
158 updatenew = '+M %s %s\n'
159 for tag, old, new in difflist:
159 for tag, old, new in difflist:
160 # translate to hex
160 # translate to hex
161 if old is not None:
161 if old is not None:
162 old = hex(old)
162 old = hex(old)
163 if new is not None:
163 if new is not None:
164 new = hex(new)
164 new = hex(new)
165 # write to file
165 # write to file
166 if old is None:
166 if old is None:
167 fp.write(add % (new, tag))
167 fp.write(add % (new, tag))
168 elif new is None:
168 elif new is None:
169 fp.write(remove % (old, tag))
169 fp.write(remove % (old, tag))
170 else:
170 else:
171 fp.write(updateold % (old, tag))
171 fp.write(updateold % (old, tag))
172 fp.write(updatenew % (new, tag))
172 fp.write(updatenew % (new, tag))
173
173
174 def findglobaltags(ui, repo):
174 def findglobaltags(ui, repo):
175 '''Find global tags in a repo: return a tagsmap
175 '''Find global tags in a repo: return a tagsmap
176
176
177 tagsmap: tag name to (node, hist) 2-tuples.
177 tagsmap: tag name to (node, hist) 2-tuples.
178
178
179 The tags cache is read and updated as a side-effect of calling.
179 The tags cache is read and updated as a side-effect of calling.
180 '''
180 '''
181 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
181 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
182 if cachetags is not None:
182 if cachetags is not None:
183 assert not shouldwrite
183 assert not shouldwrite
184 # XXX is this really 100% correct? are there oddball special
184 # XXX is this really 100% correct? are there oddball special
185 # cases where a global tag should outrank a local tag but won't,
185 # cases where a global tag should outrank a local tag but won't,
186 # because cachetags does not contain rank info?
186 # because cachetags does not contain rank info?
187 alltags = {}
187 alltags = {}
188 _updatetags(cachetags, alltags)
188 _updatetags(cachetags, alltags)
189 return alltags
189 return alltags
190
190
191 for head in reversed(heads): # oldest to newest
191 for head in reversed(heads): # oldest to newest
192 assert head in repo.changelog.nodemap, (
192 assert head in repo.changelog.nodemap, (
193 "tag cache returned bogus head %s" % short(head))
193 "tag cache returned bogus head %s" % short(head))
194 fnodes = _filterfnodes(tagfnode, reversed(heads))
194 fnodes = _filterfnodes(tagfnode, reversed(heads))
195 alltags = _tagsfromfnodes(ui, repo, fnodes)
195 alltags = _tagsfromfnodes(ui, repo, fnodes)
196
196
197 # and update the cache (if necessary)
197 # and update the cache (if necessary)
198 if shouldwrite:
198 if shouldwrite:
199 _writetagcache(ui, repo, valid, alltags)
199 _writetagcache(ui, repo, valid, alltags)
200 return alltags
200 return alltags
201
201
202 def _filterfnodes(tagfnode, nodes):
202 def _filterfnodes(tagfnode, nodes):
203 """return a list of unique fnodes
203 """return a list of unique fnodes
204
204
205 The order of this list matches the order of "nodes". Preserving this order
205 The order of this list matches the order of "nodes". Preserving this order
206 is important as reading tags in different order provides different
206 is important as reading tags in different order provides different
207 results."""
207 results."""
208 seen = set() # set of fnode
208 seen = set() # set of fnode
209 fnodes = []
209 fnodes = []
210 for no in nodes: # oldest to newest
210 for no in nodes: # oldest to newest
211 fnode = tagfnode.get(no)
211 fnode = tagfnode.get(no)
212 if fnode and fnode not in seen:
212 if fnode and fnode not in seen:
213 seen.add(fnode)
213 seen.add(fnode)
214 fnodes.append(fnode)
214 fnodes.append(fnode)
215 return fnodes
215 return fnodes
216
216
217 def _tagsfromfnodes(ui, repo, fnodes):
217 def _tagsfromfnodes(ui, repo, fnodes):
218 """return a tagsmap from a list of file-node
218 """return a tagsmap from a list of file-node
219
219
220 tagsmap: tag name to (node, hist) 2-tuples.
220 tagsmap: tag name to (node, hist) 2-tuples.
221
221
222 The order of the list matters."""
222 The order of the list matters."""
223 alltags = {}
223 alltags = {}
224 fctx = None
224 fctx = None
225 for fnode in fnodes:
225 for fnode in fnodes:
226 if fctx is None:
226 if fctx is None:
227 fctx = repo.filectx('.hgtags', fileid=fnode)
227 fctx = repo.filectx('.hgtags', fileid=fnode)
228 else:
228 else:
229 fctx = fctx.filectx(fnode)
229 fctx = fctx.filectx(fnode)
230 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
230 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
231 _updatetags(filetags, alltags)
231 _updatetags(filetags, alltags)
232 return alltags
232 return alltags
233
233
234 def readlocaltags(ui, repo, alltags, tagtypes):
234 def readlocaltags(ui, repo, alltags, tagtypes):
235 '''Read local tags in repo. Update alltags and tagtypes.'''
235 '''Read local tags in repo. Update alltags and tagtypes.'''
236 try:
236 try:
237 data = repo.vfs.read("localtags")
237 data = repo.vfs.read("localtags")
238 except IOError as inst:
238 except IOError as inst:
239 if inst.errno != errno.ENOENT:
239 if inst.errno != errno.ENOENT:
240 raise
240 raise
241 return
241 return
242
242
243 # localtags is in the local encoding; re-encode to UTF-8 on
243 # localtags is in the local encoding; re-encode to UTF-8 on
244 # input for consistency with the rest of this module.
244 # input for consistency with the rest of this module.
245 filetags = _readtags(
245 filetags = _readtags(
246 ui, repo, data.splitlines(), "localtags",
246 ui, repo, data.splitlines(), "localtags",
247 recode=encoding.fromlocal)
247 recode=encoding.fromlocal)
248
248
249 # remove tags pointing to invalid nodes
249 # remove tags pointing to invalid nodes
250 cl = repo.changelog
250 cl = repo.changelog
251 for t in list(filetags):
251 for t in list(filetags):
252 try:
252 try:
253 cl.rev(filetags[t][0])
253 cl.rev(filetags[t][0])
254 except (LookupError, ValueError):
254 except (LookupError, ValueError):
255 del filetags[t]
255 del filetags[t]
256
256
257 _updatetags(filetags, alltags, 'local', tagtypes)
257 _updatetags(filetags, alltags, 'local', tagtypes)
258
258
259 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
259 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
260 '''Read tag definitions from a file (or any source of lines).
260 '''Read tag definitions from a file (or any source of lines).
261
261
262 This function returns two sortdicts with similar information:
262 This function returns two sortdicts with similar information:
263
263
264 - the first dict, bintaghist, contains the tag information as expected by
264 - the first dict, bintaghist, contains the tag information as expected by
265 the _readtags function, i.e. a mapping from tag name to (node, hist):
265 the _readtags function, i.e. a mapping from tag name to (node, hist):
266 - node is the node id from the last line read for that name,
266 - node is the node id from the last line read for that name,
267 - hist is the list of node ids previously associated with it (in file
267 - hist is the list of node ids previously associated with it (in file
268 order). All node ids are binary, not hex.
268 order). All node ids are binary, not hex.
269
269
270 - the second dict, hextaglines, is a mapping from tag name to a list of
270 - the second dict, hextaglines, is a mapping from tag name to a list of
271 [hexnode, line number] pairs, ordered from the oldest to the newest node.
271 [hexnode, line number] pairs, ordered from the oldest to the newest node.
272
272
273 When calcnodelines is False the hextaglines dict is not calculated (an
273 When calcnodelines is False the hextaglines dict is not calculated (an
274 empty dict is returned). This is done to improve this function's
274 empty dict is returned). This is done to improve this function's
275 performance in cases where the line numbers are not needed.
275 performance in cases where the line numbers are not needed.
276 '''
276 '''
277
277
278 bintaghist = util.sortdict()
278 bintaghist = util.sortdict()
279 hextaglines = util.sortdict()
279 hextaglines = util.sortdict()
280 count = 0
280 count = 0
281
281
282 def dbg(msg):
282 def dbg(msg):
283 ui.debug("%s, line %d: %s\n" % (fn, count, msg))
283 ui.debug("%s, line %d: %s\n" % (fn, count, msg))
284
284
285 for nline, line in enumerate(lines):
285 for nline, line in enumerate(lines):
286 count += 1
286 count += 1
287 if not line:
287 if not line:
288 continue
288 continue
289 try:
289 try:
290 (nodehex, name) = line.split(" ", 1)
290 (nodehex, name) = line.split(" ", 1)
291 except ValueError:
291 except ValueError:
292 dbg("cannot parse entry")
292 dbg("cannot parse entry")
293 continue
293 continue
294 name = name.strip()
294 name = name.strip()
295 if recode:
295 if recode:
296 name = recode(name)
296 name = recode(name)
297 try:
297 try:
298 nodebin = bin(nodehex)
298 nodebin = bin(nodehex)
299 except TypeError:
299 except TypeError:
300 dbg("node '%s' is not well formed" % nodehex)
300 dbg("node '%s' is not well formed" % nodehex)
301 continue
301 continue
302
302
303 # update filetags
303 # update filetags
304 if calcnodelines:
304 if calcnodelines:
305 # map tag name to a list of line numbers
305 # map tag name to a list of line numbers
306 if name not in hextaglines:
306 if name not in hextaglines:
307 hextaglines[name] = []
307 hextaglines[name] = []
308 hextaglines[name].append([nodehex, nline])
308 hextaglines[name].append([nodehex, nline])
309 continue
309 continue
310 # map tag name to (node, hist)
310 # map tag name to (node, hist)
311 if name not in bintaghist:
311 if name not in bintaghist:
312 bintaghist[name] = []
312 bintaghist[name] = []
313 bintaghist[name].append(nodebin)
313 bintaghist[name].append(nodebin)
314 return bintaghist, hextaglines
314 return bintaghist, hextaglines
315
315
316 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
316 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
317 '''Read tag definitions from a file (or any source of lines).
317 '''Read tag definitions from a file (or any source of lines).
318
318
319 Returns a mapping from tag name to (node, hist).
319 Returns a mapping from tag name to (node, hist).
320
320
321 "node" is the node id from the last line read for that name. "hist"
321 "node" is the node id from the last line read for that name. "hist"
322 is the list of node ids previously associated with it (in file order).
322 is the list of node ids previously associated with it (in file order).
323 All node ids are binary, not hex.
323 All node ids are binary, not hex.
324 '''
324 '''
325 filetags, nodelines = _readtaghist(ui, repo, lines, fn, recode=recode,
325 filetags, nodelines = _readtaghist(ui, repo, lines, fn, recode=recode,
326 calcnodelines=calcnodelines)
326 calcnodelines=calcnodelines)
327 # util.sortdict().__setitem__ is much slower at replacing then inserting
327 # util.sortdict().__setitem__ is much slower at replacing then inserting
328 # new entries. The difference can matter if there are thousands of tags.
328 # new entries. The difference can matter if there are thousands of tags.
329 # Create a new sortdict to avoid the performance penalty.
329 # Create a new sortdict to avoid the performance penalty.
330 newtags = util.sortdict()
330 newtags = util.sortdict()
331 for tag, taghist in filetags.items():
331 for tag, taghist in filetags.items():
332 newtags[tag] = (taghist[-1], taghist[:-1])
332 newtags[tag] = (taghist[-1], taghist[:-1])
333 return newtags
333 return newtags
334
334
335 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
335 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
336 """Incorporate the tag info read from one file into dictionnaries
336 """Incorporate the tag info read from one file into dictionnaries
337
337
338 The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
338 The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
339
339
340 The second one, 'tagtypes', is optional and will be updated to track the
340 The second one, 'tagtypes', is optional and will be updated to track the
341 "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
341 "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
342 needs to be set."""
342 needs to be set."""
343 if tagtype is None:
343 if tagtype is None:
344 assert tagtypes is None
344 assert tagtypes is None
345
345
346 for name, nodehist in filetags.iteritems():
346 for name, nodehist in filetags.iteritems():
347 if name not in alltags:
347 if name not in alltags:
348 alltags[name] = nodehist
348 alltags[name] = nodehist
349 if tagtype is not None:
349 if tagtype is not None:
350 tagtypes[name] = tagtype
350 tagtypes[name] = tagtype
351 continue
351 continue
352
352
353 # we prefer alltags[name] if:
353 # we prefer alltags[name] if:
354 # it supersedes us OR
354 # it supersedes us OR
355 # mutual supersedes and it has a higher rank
355 # mutual supersedes and it has a higher rank
356 # otherwise we win because we're tip-most
356 # otherwise we win because we're tip-most
357 anode, ahist = nodehist
357 anode, ahist = nodehist
358 bnode, bhist = alltags[name]
358 bnode, bhist = alltags[name]
359 if (bnode != anode and anode in bhist and
359 if (bnode != anode and anode in bhist and
360 (bnode not in ahist or len(bhist) > len(ahist))):
360 (bnode not in ahist or len(bhist) > len(ahist))):
361 anode = bnode
361 anode = bnode
362 elif tagtype is not None:
362 elif tagtype is not None:
363 tagtypes[name] = tagtype
363 tagtypes[name] = tagtype
364 ahist.extend([n for n in bhist if n not in ahist])
364 ahist.extend([n for n in bhist if n not in ahist])
365 alltags[name] = anode, ahist
365 alltags[name] = anode, ahist
366
366
367 def _filename(repo):
367 def _filename(repo):
368 """name of a tagcache file for a given repo or repoview"""
368 """name of a tagcache file for a given repo or repoview"""
369 filename = 'tags2'
369 filename = 'tags2'
370 if repo.filtername:
370 if repo.filtername:
371 filename = '%s-%s' % (filename, repo.filtername)
371 filename = '%s-%s' % (filename, repo.filtername)
372 return filename
372 return filename
373
373
374 def _readtagcache(ui, repo):
374 def _readtagcache(ui, repo):
375 '''Read the tag cache.
375 '''Read the tag cache.
376
376
377 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
377 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
378
378
379 If the cache is completely up-to-date, "cachetags" is a dict of the
379 If the cache is completely up-to-date, "cachetags" is a dict of the
380 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
380 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
381 None and "shouldwrite" is False.
381 None and "shouldwrite" is False.
382
382
383 If the cache is not up to date, "cachetags" is None. "heads" is a list
383 If the cache is not up to date, "cachetags" is None. "heads" is a list
384 of all heads currently in the repository, ordered from tip to oldest.
384 of all heads currently in the repository, ordered from tip to oldest.
385 "validinfo" is a tuple describing cache validation info. This is used
385 "validinfo" is a tuple describing cache validation info. This is used
386 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
386 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
387 filenode. "shouldwrite" is True.
387 filenode. "shouldwrite" is True.
388
388
389 If the cache is not up to date, the caller is responsible for reading tag
389 If the cache is not up to date, the caller is responsible for reading tag
390 info from each returned head. (See findglobaltags().)
390 info from each returned head. (See findglobaltags().)
391 '''
391 '''
392 try:
392 try:
393 cachefile = repo.cachevfs(_filename(repo), 'r')
393 cachefile = repo.cachevfs(_filename(repo), 'r')
394 # force reading the file for static-http
394 # force reading the file for static-http
395 cachelines = iter(cachefile)
395 cachelines = iter(cachefile)
396 except IOError:
396 except IOError:
397 cachefile = None
397 cachefile = None
398
398
399 cacherev = None
399 cacherev = None
400 cachenode = None
400 cachenode = None
401 cachehash = None
401 cachehash = None
402 if cachefile:
402 if cachefile:
403 try:
403 try:
404 validline = next(cachelines)
404 validline = next(cachelines)
405 validline = validline.split()
405 validline = validline.split()
406 cacherev = int(validline[0])
406 cacherev = int(validline[0])
407 cachenode = bin(validline[1])
407 cachenode = bin(validline[1])
408 if len(validline) > 2:
408 if len(validline) > 2:
409 cachehash = bin(validline[2])
409 cachehash = bin(validline[2])
410 except Exception:
410 except Exception:
411 # corruption of the cache, just recompute it.
411 # corruption of the cache, just recompute it.
412 pass
412 pass
413
413
414 tipnode = repo.changelog.tip()
414 tipnode = repo.changelog.tip()
415 tiprev = len(repo.changelog) - 1
415 tiprev = len(repo.changelog) - 1
416
416
417 # Case 1 (common): tip is the same, so nothing has changed.
417 # Case 1 (common): tip is the same, so nothing has changed.
418 # (Unchanged tip trivially means no changesets have been added.
418 # (Unchanged tip trivially means no changesets have been added.
419 # But, thanks to localrepository.destroyed(), it also means none
419 # But, thanks to localrepository.destroyed(), it also means none
420 # have been destroyed by strip or rollback.)
420 # have been destroyed by strip or rollback.)
421 if (cacherev == tiprev
421 if (cacherev == tiprev
422 and cachenode == tipnode
422 and cachenode == tipnode
423 and cachehash == scmutil.filteredhash(repo, tiprev)):
423 and cachehash == scmutil.filteredhash(repo, tiprev)):
424 tags = _readtags(ui, repo, cachelines, cachefile.name)
424 tags = _readtags(ui, repo, cachelines, cachefile.name)
425 cachefile.close()
425 cachefile.close()
426 return (None, None, None, tags, False)
426 return (None, None, None, tags, False)
427 if cachefile:
427 if cachefile:
428 cachefile.close() # ignore rest of file
428 cachefile.close() # ignore rest of file
429
429
430 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
430 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
431
431
432 repoheads = repo.heads()
432 repoheads = repo.heads()
433 # Case 2 (uncommon): empty repo; get out quickly and don't bother
433 # Case 2 (uncommon): empty repo; get out quickly and don't bother
434 # writing an empty cache.
434 # writing an empty cache.
435 if repoheads == [nullid]:
435 if repoheads == [nullid]:
436 return ([], {}, valid, {}, False)
436 return ([], {}, valid, {}, False)
437
437
438 # Case 3 (uncommon): cache file missing or empty.
438 # Case 3 (uncommon): cache file missing or empty.
439
439
440 # Case 4 (uncommon): tip rev decreased. This should only happen
440 # Case 4 (uncommon): tip rev decreased. This should only happen
441 # when we're called from localrepository.destroyed(). Refresh the
441 # when we're called from localrepository.destroyed(). Refresh the
442 # cache so future invocations will not see disappeared heads in the
442 # cache so future invocations will not see disappeared heads in the
443 # cache.
443 # cache.
444
444
445 # Case 5 (common): tip has changed, so we've added/replaced heads.
445 # Case 5 (common): tip has changed, so we've added/replaced heads.
446
446
447 # As it happens, the code to handle cases 3, 4, 5 is the same.
447 # As it happens, the code to handle cases 3, 4, 5 is the same.
448
448
449 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
449 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
450 # exposed".
450 # exposed".
451 if not len(repo.file('.hgtags')):
451 if not len(repo.file('.hgtags')):
452 # No tags have ever been committed, so we can avoid a
452 # No tags have ever been committed, so we can avoid a
453 # potentially expensive search.
453 # potentially expensive search.
454 return ([], {}, valid, None, True)
454 return ([], {}, valid, None, True)
455
455
456
456
457 # Now we have to lookup the .hgtags filenode for every new head.
457 # Now we have to lookup the .hgtags filenode for every new head.
458 # This is the most expensive part of finding tags, so performance
458 # This is the most expensive part of finding tags, so performance
459 # depends primarily on the size of newheads. Worst case: no cache
459 # depends primarily on the size of newheads. Worst case: no cache
460 # file, so newheads == repoheads.
460 # file, so newheads == repoheads.
461 cachefnode = _getfnodes(ui, repo, repoheads)
461 # Reversed order helps the cache ('repoheads' is in descending order)
462 cachefnode = _getfnodes(ui, repo, reversed(repoheads))
462
463
463 # Caller has to iterate over all heads, but can use the filenodes in
464 # Caller has to iterate over all heads, but can use the filenodes in
464 # cachefnode to get to each .hgtags revision quickly.
465 # cachefnode to get to each .hgtags revision quickly.
465 return (repoheads, cachefnode, valid, None, True)
466 return (repoheads, cachefnode, valid, None, True)
466
467
467 def _getfnodes(ui, repo, nodes):
468 def _getfnodes(ui, repo, nodes):
468 """return .hgtags fnodes for a list of changeset nodes
469 """return .hgtags fnodes for a list of changeset nodes
469
470
470 Return value is a {node: fnode} mapping. There will be no entry for nodes
471 Return value is a {node: fnode} mapping. There will be no entry for nodes
471 without a '.hgtags' file.
472 without a '.hgtags' file.
472 """
473 """
473 starttime = util.timer()
474 starttime = util.timer()
474 fnodescache = hgtagsfnodescache(repo.unfiltered())
475 fnodescache = hgtagsfnodescache(repo.unfiltered())
475 cachefnode = {}
476 cachefnode = {}
476 for node in reversed(nodes):
477 for node in nodes:
477 fnode = fnodescache.getfnode(node)
478 fnode = fnodescache.getfnode(node)
478 if fnode != nullid:
479 if fnode != nullid:
479 cachefnode[node] = fnode
480 cachefnode[node] = fnode
480
481
481 fnodescache.write()
482 fnodescache.write()
482
483
483 duration = util.timer() - starttime
484 duration = util.timer() - starttime
484 ui.log('tagscache',
485 ui.log('tagscache',
485 '%d/%d cache hits/lookups in %0.4f seconds\n',
486 '%d/%d cache hits/lookups in %0.4f seconds\n',
486 fnodescache.hitcount, fnodescache.lookupcount, duration)
487 fnodescache.hitcount, fnodescache.lookupcount, duration)
487 return cachefnode
488 return cachefnode
488
489
489 def _writetagcache(ui, repo, valid, cachetags):
490 def _writetagcache(ui, repo, valid, cachetags):
490 filename = _filename(repo)
491 filename = _filename(repo)
491 try:
492 try:
492 cachefile = repo.cachevfs(filename, 'w', atomictemp=True)
493 cachefile = repo.cachevfs(filename, 'w', atomictemp=True)
493 except (OSError, IOError):
494 except (OSError, IOError):
494 return
495 return
495
496
496 ui.log('tagscache', 'writing .hg/cache/%s with %d tags\n',
497 ui.log('tagscache', 'writing .hg/cache/%s with %d tags\n',
497 filename, len(cachetags))
498 filename, len(cachetags))
498
499
499 if valid[2]:
500 if valid[2]:
500 cachefile.write('%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2])))
501 cachefile.write('%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2])))
501 else:
502 else:
502 cachefile.write('%d %s\n' % (valid[0], hex(valid[1])))
503 cachefile.write('%d %s\n' % (valid[0], hex(valid[1])))
503
504
504 # Tag names in the cache are in UTF-8 -- which is the whole reason
505 # Tag names in the cache are in UTF-8 -- which is the whole reason
505 # we keep them in UTF-8 throughout this module. If we converted
506 # we keep them in UTF-8 throughout this module. If we converted
506 # them local encoding on input, we would lose info writing them to
507 # them local encoding on input, we would lose info writing them to
507 # the cache.
508 # the cache.
508 for (name, (node, hist)) in sorted(cachetags.iteritems()):
509 for (name, (node, hist)) in sorted(cachetags.iteritems()):
509 for n in hist:
510 for n in hist:
510 cachefile.write("%s %s\n" % (hex(n), name))
511 cachefile.write("%s %s\n" % (hex(n), name))
511 cachefile.write("%s %s\n" % (hex(node), name))
512 cachefile.write("%s %s\n" % (hex(node), name))
512
513
513 try:
514 try:
514 cachefile.close()
515 cachefile.close()
515 except (OSError, IOError):
516 except (OSError, IOError):
516 pass
517 pass
517
518
518 def tag(repo, names, node, message, local, user, date, editor=False):
519 def tag(repo, names, node, message, local, user, date, editor=False):
519 '''tag a revision with one or more symbolic names.
520 '''tag a revision with one or more symbolic names.
520
521
521 names is a list of strings or, when adding a single tag, names may be a
522 names is a list of strings or, when adding a single tag, names may be a
522 string.
523 string.
523
524
524 if local is True, the tags are stored in a per-repository file.
525 if local is True, the tags are stored in a per-repository file.
525 otherwise, they are stored in the .hgtags file, and a new
526 otherwise, they are stored in the .hgtags file, and a new
526 changeset is committed with the change.
527 changeset is committed with the change.
527
528
528 keyword arguments:
529 keyword arguments:
529
530
530 local: whether to store tags in non-version-controlled file
531 local: whether to store tags in non-version-controlled file
531 (default False)
532 (default False)
532
533
533 message: commit message to use if committing
534 message: commit message to use if committing
534
535
535 user: name of user to use if committing
536 user: name of user to use if committing
536
537
537 date: date tuple to use if committing'''
538 date: date tuple to use if committing'''
538
539
539 if not local:
540 if not local:
540 m = matchmod.exact(['.hgtags'])
541 m = matchmod.exact(['.hgtags'])
541 if any(repo.status(match=m, unknown=True, ignored=True)):
542 if any(repo.status(match=m, unknown=True, ignored=True)):
542 raise error.Abort(_('working copy of .hgtags is changed'),
543 raise error.Abort(_('working copy of .hgtags is changed'),
543 hint=_('please commit .hgtags manually'))
544 hint=_('please commit .hgtags manually'))
544
545
545 with repo.wlock():
546 with repo.wlock():
546 repo.tags() # instantiate the cache
547 repo.tags() # instantiate the cache
547 _tag(repo, names, node, message, local, user, date,
548 _tag(repo, names, node, message, local, user, date,
548 editor=editor)
549 editor=editor)
549
550
550 def _tag(repo, names, node, message, local, user, date, extra=None,
551 def _tag(repo, names, node, message, local, user, date, extra=None,
551 editor=False):
552 editor=False):
552 if isinstance(names, bytes):
553 if isinstance(names, bytes):
553 names = (names,)
554 names = (names,)
554
555
555 branches = repo.branchmap()
556 branches = repo.branchmap()
556 for name in names:
557 for name in names:
557 repo.hook('pretag', throw=True, node=hex(node), tag=name,
558 repo.hook('pretag', throw=True, node=hex(node), tag=name,
558 local=local)
559 local=local)
559 if name in branches:
560 if name in branches:
560 repo.ui.warn(_("warning: tag %s conflicts with existing"
561 repo.ui.warn(_("warning: tag %s conflicts with existing"
561 " branch name\n") % name)
562 " branch name\n") % name)
562
563
563 def writetags(fp, names, munge, prevtags):
564 def writetags(fp, names, munge, prevtags):
564 fp.seek(0, 2)
565 fp.seek(0, 2)
565 if prevtags and not prevtags.endswith('\n'):
566 if prevtags and not prevtags.endswith('\n'):
566 fp.write('\n')
567 fp.write('\n')
567 for name in names:
568 for name in names:
568 if munge:
569 if munge:
569 m = munge(name)
570 m = munge(name)
570 else:
571 else:
571 m = name
572 m = name
572
573
573 if (repo._tagscache.tagtypes and
574 if (repo._tagscache.tagtypes and
574 name in repo._tagscache.tagtypes):
575 name in repo._tagscache.tagtypes):
575 old = repo.tags().get(name, nullid)
576 old = repo.tags().get(name, nullid)
576 fp.write('%s %s\n' % (hex(old), m))
577 fp.write('%s %s\n' % (hex(old), m))
577 fp.write('%s %s\n' % (hex(node), m))
578 fp.write('%s %s\n' % (hex(node), m))
578 fp.close()
579 fp.close()
579
580
580 prevtags = ''
581 prevtags = ''
581 if local:
582 if local:
582 try:
583 try:
583 fp = repo.vfs('localtags', 'r+')
584 fp = repo.vfs('localtags', 'r+')
584 except IOError:
585 except IOError:
585 fp = repo.vfs('localtags', 'a')
586 fp = repo.vfs('localtags', 'a')
586 else:
587 else:
587 prevtags = fp.read()
588 prevtags = fp.read()
588
589
589 # local tags are stored in the current charset
590 # local tags are stored in the current charset
590 writetags(fp, names, None, prevtags)
591 writetags(fp, names, None, prevtags)
591 for name in names:
592 for name in names:
592 repo.hook('tag', node=hex(node), tag=name, local=local)
593 repo.hook('tag', node=hex(node), tag=name, local=local)
593 return
594 return
594
595
595 try:
596 try:
596 fp = repo.wvfs('.hgtags', 'rb+')
597 fp = repo.wvfs('.hgtags', 'rb+')
597 except IOError as e:
598 except IOError as e:
598 if e.errno != errno.ENOENT:
599 if e.errno != errno.ENOENT:
599 raise
600 raise
600 fp = repo.wvfs('.hgtags', 'ab')
601 fp = repo.wvfs('.hgtags', 'ab')
601 else:
602 else:
602 prevtags = fp.read()
603 prevtags = fp.read()
603
604
604 # committed tags are stored in UTF-8
605 # committed tags are stored in UTF-8
605 writetags(fp, names, encoding.fromlocal, prevtags)
606 writetags(fp, names, encoding.fromlocal, prevtags)
606
607
607 fp.close()
608 fp.close()
608
609
609 repo.invalidatecaches()
610 repo.invalidatecaches()
610
611
611 if '.hgtags' not in repo.dirstate:
612 if '.hgtags' not in repo.dirstate:
612 repo[None].add(['.hgtags'])
613 repo[None].add(['.hgtags'])
613
614
614 m = matchmod.exact(['.hgtags'])
615 m = matchmod.exact(['.hgtags'])
615 tagnode = repo.commit(message, user, date, extra=extra, match=m,
616 tagnode = repo.commit(message, user, date, extra=extra, match=m,
616 editor=editor)
617 editor=editor)
617
618
618 for name in names:
619 for name in names:
619 repo.hook('tag', node=hex(node), tag=name, local=local)
620 repo.hook('tag', node=hex(node), tag=name, local=local)
620
621
621 return tagnode
622 return tagnode
622
623
623 _fnodescachefile = 'hgtagsfnodes1'
624 _fnodescachefile = 'hgtagsfnodes1'
624 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
625 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
625 _fnodesmissingrec = '\xff' * 24
626 _fnodesmissingrec = '\xff' * 24
626
627
627 class hgtagsfnodescache(object):
628 class hgtagsfnodescache(object):
628 """Persistent cache mapping revisions to .hgtags filenodes.
629 """Persistent cache mapping revisions to .hgtags filenodes.
629
630
630 The cache is an array of records. Each item in the array corresponds to
631 The cache is an array of records. Each item in the array corresponds to
631 a changelog revision. Values in the array contain the first 4 bytes of
632 a changelog revision. Values in the array contain the first 4 bytes of
632 the node hash and the 20 bytes .hgtags filenode for that revision.
633 the node hash and the 20 bytes .hgtags filenode for that revision.
633
634
634 The first 4 bytes are present as a form of verification. Repository
635 The first 4 bytes are present as a form of verification. Repository
635 stripping and rewriting may change the node at a numeric revision in the
636 stripping and rewriting may change the node at a numeric revision in the
636 changelog. The changeset fragment serves as a verifier to detect
637 changelog. The changeset fragment serves as a verifier to detect
637 rewriting. This logic is shared with the rev branch cache (see
638 rewriting. This logic is shared with the rev branch cache (see
638 branchmap.py).
639 branchmap.py).
639
640
640 The instance holds in memory the full cache content but entries are
641 The instance holds in memory the full cache content but entries are
641 only parsed on read.
642 only parsed on read.
642
643
643 Instances behave like lists. ``c[i]`` works where i is a rev or
644 Instances behave like lists. ``c[i]`` works where i is a rev or
644 changeset node. Missing indexes are populated automatically on access.
645 changeset node. Missing indexes are populated automatically on access.
645 """
646 """
646 def __init__(self, repo):
647 def __init__(self, repo):
647 assert repo.filtername is None
648 assert repo.filtername is None
648
649
649 self._repo = repo
650 self._repo = repo
650
651
651 # Only for reporting purposes.
652 # Only for reporting purposes.
652 self.lookupcount = 0
653 self.lookupcount = 0
653 self.hitcount = 0
654 self.hitcount = 0
654
655
655
656
656 try:
657 try:
657 data = repo.cachevfs.read(_fnodescachefile)
658 data = repo.cachevfs.read(_fnodescachefile)
658 except (OSError, IOError):
659 except (OSError, IOError):
659 data = ""
660 data = ""
660 self._raw = bytearray(data)
661 self._raw = bytearray(data)
661
662
662 # The end state of self._raw is an array that is of the exact length
663 # The end state of self._raw is an array that is of the exact length
663 # required to hold a record for every revision in the repository.
664 # required to hold a record for every revision in the repository.
664 # We truncate or extend the array as necessary. self._dirtyoffset is
665 # We truncate or extend the array as necessary. self._dirtyoffset is
665 # defined to be the start offset at which we need to write the output
666 # defined to be the start offset at which we need to write the output
666 # file. This offset is also adjusted when new entries are calculated
667 # file. This offset is also adjusted when new entries are calculated
667 # for array members.
668 # for array members.
668 cllen = len(repo.changelog)
669 cllen = len(repo.changelog)
669 wantedlen = cllen * _fnodesrecsize
670 wantedlen = cllen * _fnodesrecsize
670 rawlen = len(self._raw)
671 rawlen = len(self._raw)
671
672
672 self._dirtyoffset = None
673 self._dirtyoffset = None
673
674
674 if rawlen < wantedlen:
675 if rawlen < wantedlen:
675 self._dirtyoffset = rawlen
676 self._dirtyoffset = rawlen
676 self._raw.extend('\xff' * (wantedlen - rawlen))
677 self._raw.extend('\xff' * (wantedlen - rawlen))
677 elif rawlen > wantedlen:
678 elif rawlen > wantedlen:
678 # There's no easy way to truncate array instances. This seems
679 # There's no easy way to truncate array instances. This seems
679 # slightly less evil than copying a potentially large array slice.
680 # slightly less evil than copying a potentially large array slice.
680 for i in range(rawlen - wantedlen):
681 for i in range(rawlen - wantedlen):
681 self._raw.pop()
682 self._raw.pop()
682 self._dirtyoffset = len(self._raw)
683 self._dirtyoffset = len(self._raw)
683
684
684 def getfnode(self, node, computemissing=True):
685 def getfnode(self, node, computemissing=True):
685 """Obtain the filenode of the .hgtags file at a specified revision.
686 """Obtain the filenode of the .hgtags file at a specified revision.
686
687
687 If the value is in the cache, the entry will be validated and returned.
688 If the value is in the cache, the entry will be validated and returned.
688 Otherwise, the filenode will be computed and returned unless
689 Otherwise, the filenode will be computed and returned unless
689 "computemissing" is False, in which case None will be returned without
690 "computemissing" is False, in which case None will be returned without
690 any potentially expensive computation being performed.
691 any potentially expensive computation being performed.
691
692
692 If an .hgtags does not exist at the specified revision, nullid is
693 If an .hgtags does not exist at the specified revision, nullid is
693 returned.
694 returned.
694 """
695 """
695 if node == nullid:
696 if node == nullid:
696 return nullid
697 return nullid
697
698
698 ctx = self._repo[node]
699 ctx = self._repo[node]
699 rev = ctx.rev()
700 rev = ctx.rev()
700
701
701 self.lookupcount += 1
702 self.lookupcount += 1
702
703
703 offset = rev * _fnodesrecsize
704 offset = rev * _fnodesrecsize
704 record = '%s' % self._raw[offset:offset + _fnodesrecsize]
705 record = '%s' % self._raw[offset:offset + _fnodesrecsize]
705 properprefix = node[0:4]
706 properprefix = node[0:4]
706
707
707 # Validate and return existing entry.
708 # Validate and return existing entry.
708 if record != _fnodesmissingrec:
709 if record != _fnodesmissingrec:
709 fileprefix = record[0:4]
710 fileprefix = record[0:4]
710
711
711 if fileprefix == properprefix:
712 if fileprefix == properprefix:
712 self.hitcount += 1
713 self.hitcount += 1
713 return record[4:]
714 return record[4:]
714
715
715 # Fall through.
716 # Fall through.
716
717
717 # If we get here, the entry is either missing or invalid.
718 # If we get here, the entry is either missing or invalid.
718
719
719 if not computemissing:
720 if not computemissing:
720 return None
721 return None
721
722
722 fnode = None
723 fnode = None
723 cl = self._repo.changelog
724 cl = self._repo.changelog
724 p1rev, p2rev = cl._uncheckedparentrevs(rev)
725 p1rev, p2rev = cl._uncheckedparentrevs(rev)
725 p1node = cl.node(p1rev)
726 p1node = cl.node(p1rev)
726 p1fnode = self.getfnode(p1node, computemissing=False)
727 p1fnode = self.getfnode(p1node, computemissing=False)
727 if p2rev != nullrev:
728 if p2rev != nullrev:
728 # There is some no-merge changeset where p1 is null and p2 is set
729 # There is some no-merge changeset where p1 is null and p2 is set
729 # Processing them as merge is just slower, but still gives a good
730 # Processing them as merge is just slower, but still gives a good
730 # result.
731 # result.
731 p2node = cl.node(p1rev)
732 p2node = cl.node(p1rev)
732 p2fnode = self.getfnode(p2node, computemissing=False)
733 p2fnode = self.getfnode(p2node, computemissing=False)
733 if p1fnode != p2fnode:
734 if p1fnode != p2fnode:
734 # we cannot rely on readfast because we don't know against what
735 # we cannot rely on readfast because we don't know against what
735 # parent the readfast delta is computed
736 # parent the readfast delta is computed
736 p1fnode = None
737 p1fnode = None
737 if p1fnode is not None:
738 if p1fnode is not None:
738 mctx = ctx.manifestctx()
739 mctx = ctx.manifestctx()
739 fnode = mctx.readfast().get('.hgtags')
740 fnode = mctx.readfast().get('.hgtags')
740 if fnode is None:
741 if fnode is None:
741 fnode = p1fnode
742 fnode = p1fnode
742 if fnode is None:
743 if fnode is None:
743 # Populate missing entry.
744 # Populate missing entry.
744 try:
745 try:
745 fnode = ctx.filenode('.hgtags')
746 fnode = ctx.filenode('.hgtags')
746 except error.LookupError:
747 except error.LookupError:
747 # No .hgtags file on this revision.
748 # No .hgtags file on this revision.
748 fnode = nullid
749 fnode = nullid
749
750
750 self._writeentry(offset, properprefix, fnode)
751 self._writeentry(offset, properprefix, fnode)
751 return fnode
752 return fnode
752
753
753 def setfnode(self, node, fnode):
754 def setfnode(self, node, fnode):
754 """Set the .hgtags filenode for a given changeset."""
755 """Set the .hgtags filenode for a given changeset."""
755 assert len(fnode) == 20
756 assert len(fnode) == 20
756 ctx = self._repo[node]
757 ctx = self._repo[node]
757
758
758 # Do a lookup first to avoid writing if nothing has changed.
759 # Do a lookup first to avoid writing if nothing has changed.
759 if self.getfnode(ctx.node(), computemissing=False) == fnode:
760 if self.getfnode(ctx.node(), computemissing=False) == fnode:
760 return
761 return
761
762
762 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
763 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
763
764
764 def _writeentry(self, offset, prefix, fnode):
765 def _writeentry(self, offset, prefix, fnode):
765 # Slices on array instances only accept other array.
766 # Slices on array instances only accept other array.
766 entry = bytearray(prefix + fnode)
767 entry = bytearray(prefix + fnode)
767 self._raw[offset:offset + _fnodesrecsize] = entry
768 self._raw[offset:offset + _fnodesrecsize] = entry
768 # self._dirtyoffset could be None.
769 # self._dirtyoffset could be None.
769 self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0)
770 self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0)
770
771
771 def write(self):
772 def write(self):
772 """Perform all necessary writes to cache file.
773 """Perform all necessary writes to cache file.
773
774
774 This may no-op if no writes are needed or if a write lock could
775 This may no-op if no writes are needed or if a write lock could
775 not be obtained.
776 not be obtained.
776 """
777 """
777 if self._dirtyoffset is None:
778 if self._dirtyoffset is None:
778 return
779 return
779
780
780 data = self._raw[self._dirtyoffset:]
781 data = self._raw[self._dirtyoffset:]
781 if not data:
782 if not data:
782 return
783 return
783
784
784 repo = self._repo
785 repo = self._repo
785
786
786 try:
787 try:
787 lock = repo.wlock(wait=False)
788 lock = repo.wlock(wait=False)
788 except error.LockError:
789 except error.LockError:
789 repo.ui.log('tagscache', 'not writing .hg/cache/%s because '
790 repo.ui.log('tagscache', 'not writing .hg/cache/%s because '
790 'lock cannot be acquired\n' % (_fnodescachefile))
791 'lock cannot be acquired\n' % (_fnodescachefile))
791 return
792 return
792
793
793 try:
794 try:
794 f = repo.cachevfs.open(_fnodescachefile, 'ab')
795 f = repo.cachevfs.open(_fnodescachefile, 'ab')
795 try:
796 try:
796 # if the file has been truncated
797 # if the file has been truncated
797 actualoffset = f.tell()
798 actualoffset = f.tell()
798 if actualoffset < self._dirtyoffset:
799 if actualoffset < self._dirtyoffset:
799 self._dirtyoffset = actualoffset
800 self._dirtyoffset = actualoffset
800 data = self._raw[self._dirtyoffset:]
801 data = self._raw[self._dirtyoffset:]
801 f.seek(self._dirtyoffset)
802 f.seek(self._dirtyoffset)
802 f.truncate()
803 f.truncate()
803 repo.ui.log('tagscache',
804 repo.ui.log('tagscache',
804 'writing %d bytes to cache/%s\n' % (
805 'writing %d bytes to cache/%s\n' % (
805 len(data), _fnodescachefile))
806 len(data), _fnodescachefile))
806 f.write(data)
807 f.write(data)
807 self._dirtyoffset = None
808 self._dirtyoffset = None
808 finally:
809 finally:
809 f.close()
810 f.close()
810 except (IOError, OSError) as inst:
811 except (IOError, OSError) as inst:
811 repo.ui.log('tagscache',
812 repo.ui.log('tagscache',
812 "couldn't write cache/%s: %s\n" % (
813 "couldn't write cache/%s: %s\n" % (
813 _fnodescachefile, stringutil.forcebytestr(inst)))
814 _fnodescachefile, stringutil.forcebytestr(inst)))
814 finally:
815 finally:
815 lock.release()
816 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now