##// END OF EJS Templates
tags: explicitly grab list of dict keys...
Augie Fackler -
r35846:553a98a4 default
parent child Browse files
Show More
@@ -1,788 +1,788 b''
1 # tags.py - read tag info from local repository
1 # tags.py - read tag info from local repository
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 # Currently this module only deals with reading and caching tags.
9 # Currently this module only deals with reading and caching tags.
10 # Eventually, it could take care of updating (adding/removing/moving)
10 # Eventually, it could take care of updating (adding/removing/moving)
11 # tags too.
11 # tags too.
12
12
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import errno
15 import errno
16
16
17 from .node import (
17 from .node import (
18 bin,
18 bin,
19 hex,
19 hex,
20 nullid,
20 nullid,
21 short,
21 short,
22 )
22 )
23 from .i18n import _
23 from .i18n import _
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 error,
26 error,
27 match as matchmod,
27 match as matchmod,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 )
30 )
31
31
32 # Tags computation can be expensive and caches exist to make it fast in
32 # Tags computation can be expensive and caches exist to make it fast in
33 # the common case.
33 # the common case.
34 #
34 #
35 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
35 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
36 # each revision in the repository. The file is effectively an array of
36 # each revision in the repository. The file is effectively an array of
37 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
37 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
38 # details.
38 # details.
39 #
39 #
40 # The .hgtags filenode cache grows in proportion to the length of the
40 # The .hgtags filenode cache grows in proportion to the length of the
41 # changelog. The file is truncated when the # changelog is stripped.
41 # changelog. The file is truncated when the # changelog is stripped.
42 #
42 #
43 # The purpose of the filenode cache is to avoid the most expensive part
43 # The purpose of the filenode cache is to avoid the most expensive part
44 # of finding global tags, which is looking up the .hgtags filenode in the
44 # of finding global tags, which is looking up the .hgtags filenode in the
45 # manifest for each head. This can take dozens or over 100ms for
45 # manifest for each head. This can take dozens or over 100ms for
46 # repositories with very large manifests. Multiplied by dozens or even
46 # repositories with very large manifests. Multiplied by dozens or even
47 # hundreds of heads and there is a significant performance concern.
47 # hundreds of heads and there is a significant performance concern.
48 #
48 #
49 # There also exist a separate cache file for each repository filter.
49 # There also exist a separate cache file for each repository filter.
50 # These "tags-*" files store information about the history of tags.
50 # These "tags-*" files store information about the history of tags.
51 #
51 #
52 # The tags cache files consists of a cache validation line followed by
52 # The tags cache files consists of a cache validation line followed by
53 # a history of tags.
53 # a history of tags.
54 #
54 #
55 # The cache validation line has the format:
55 # The cache validation line has the format:
56 #
56 #
57 # <tiprev> <tipnode> [<filteredhash>]
57 # <tiprev> <tipnode> [<filteredhash>]
58 #
58 #
59 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
59 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
60 # node for that changeset. These redundantly identify the repository
60 # node for that changeset. These redundantly identify the repository
61 # tip from the time the cache was written. In addition, <filteredhash>,
61 # tip from the time the cache was written. In addition, <filteredhash>,
62 # if present, is a 40 character hex hash of the contents of the filtered
62 # if present, is a 40 character hex hash of the contents of the filtered
63 # revisions for this filter. If the set of filtered revs changes, the
63 # revisions for this filter. If the set of filtered revs changes, the
64 # hash will change and invalidate the cache.
64 # hash will change and invalidate the cache.
65 #
65 #
66 # The history part of the tags cache consists of lines of the form:
66 # The history part of the tags cache consists of lines of the form:
67 #
67 #
68 # <node> <tag>
68 # <node> <tag>
69 #
69 #
70 # (This format is identical to that of .hgtags files.)
70 # (This format is identical to that of .hgtags files.)
71 #
71 #
72 # <tag> is the tag name and <node> is the 40 character hex changeset
72 # <tag> is the tag name and <node> is the 40 character hex changeset
73 # the tag is associated with.
73 # the tag is associated with.
74 #
74 #
75 # Tags are written sorted by tag name.
75 # Tags are written sorted by tag name.
76 #
76 #
77 # Tags associated with multiple changesets have an entry for each changeset.
77 # Tags associated with multiple changesets have an entry for each changeset.
78 # The most recent changeset (in terms of revlog ordering for the head
78 # The most recent changeset (in terms of revlog ordering for the head
79 # setting it) for each tag is last.
79 # setting it) for each tag is last.
80
80
81 def fnoderevs(ui, repo, revs):
81 def fnoderevs(ui, repo, revs):
82 """return the list of '.hgtags' fnodes used in a set revisions
82 """return the list of '.hgtags' fnodes used in a set revisions
83
83
84 This is returned as list of unique fnodes. We use a list instead of a set
84 This is returned as list of unique fnodes. We use a list instead of a set
85 because order matters when it comes to tags."""
85 because order matters when it comes to tags."""
86 unfi = repo.unfiltered()
86 unfi = repo.unfiltered()
87 tonode = unfi.changelog.node
87 tonode = unfi.changelog.node
88 nodes = [tonode(r) for r in revs]
88 nodes = [tonode(r) for r in revs]
89 fnodes = _getfnodes(ui, repo, nodes[::-1]) # reversed help the cache
89 fnodes = _getfnodes(ui, repo, nodes[::-1]) # reversed help the cache
90 fnodes = _filterfnodes(fnodes, nodes)
90 fnodes = _filterfnodes(fnodes, nodes)
91 return fnodes
91 return fnodes
92
92
93 def _nulltonone(value):
93 def _nulltonone(value):
94 """convert nullid to None
94 """convert nullid to None
95
95
96 For tag value, nullid means "deleted". This small utility function helps
96 For tag value, nullid means "deleted". This small utility function helps
97 translating that to None."""
97 translating that to None."""
98 if value == nullid:
98 if value == nullid:
99 return None
99 return None
100 return value
100 return value
101
101
102 def difftags(ui, repo, oldfnodes, newfnodes):
102 def difftags(ui, repo, oldfnodes, newfnodes):
103 """list differences between tags expressed in two set of file-nodes
103 """list differences between tags expressed in two set of file-nodes
104
104
105 The list contains entries in the form: (tagname, oldvalue, new value).
105 The list contains entries in the form: (tagname, oldvalue, new value).
106 None is used to expressed missing value:
106 None is used to expressed missing value:
107 ('foo', None, 'abcd') is a new tag,
107 ('foo', None, 'abcd') is a new tag,
108 ('bar', 'ef01', None) is a deletion,
108 ('bar', 'ef01', None) is a deletion,
109 ('baz', 'abcd', 'ef01') is a tag movement.
109 ('baz', 'abcd', 'ef01') is a tag movement.
110 """
110 """
111 if oldfnodes == newfnodes:
111 if oldfnodes == newfnodes:
112 return []
112 return []
113 oldtags = _tagsfromfnodes(ui, repo, oldfnodes)
113 oldtags = _tagsfromfnodes(ui, repo, oldfnodes)
114 newtags = _tagsfromfnodes(ui, repo, newfnodes)
114 newtags = _tagsfromfnodes(ui, repo, newfnodes)
115
115
116 # list of (tag, old, new): None means missing
116 # list of (tag, old, new): None means missing
117 entries = []
117 entries = []
118 for tag, (new, __) in newtags.items():
118 for tag, (new, __) in newtags.items():
119 new = _nulltonone(new)
119 new = _nulltonone(new)
120 old, __ = oldtags.pop(tag, (None, None))
120 old, __ = oldtags.pop(tag, (None, None))
121 old = _nulltonone(old)
121 old = _nulltonone(old)
122 if old != new:
122 if old != new:
123 entries.append((tag, old, new))
123 entries.append((tag, old, new))
124 # handle deleted tags
124 # handle deleted tags
125 for tag, (old, __) in oldtags.items():
125 for tag, (old, __) in oldtags.items():
126 old = _nulltonone(old)
126 old = _nulltonone(old)
127 if old is not None:
127 if old is not None:
128 entries.append((tag, old, None))
128 entries.append((tag, old, None))
129 entries.sort()
129 entries.sort()
130 return entries
130 return entries
131
131
132 def writediff(fp, difflist):
132 def writediff(fp, difflist):
133 """write tags diff information to a file.
133 """write tags diff information to a file.
134
134
135 Data are stored with a line based format:
135 Data are stored with a line based format:
136
136
137 <action> <hex-node> <tag-name>\n
137 <action> <hex-node> <tag-name>\n
138
138
139 Action are defined as follow:
139 Action are defined as follow:
140 -R tag is removed,
140 -R tag is removed,
141 +A tag is added,
141 +A tag is added,
142 -M tag is moved (old value),
142 -M tag is moved (old value),
143 +M tag is moved (new value),
143 +M tag is moved (new value),
144
144
145 Example:
145 Example:
146
146
147 +A 875517b4806a848f942811a315a5bce30804ae85 t5
147 +A 875517b4806a848f942811a315a5bce30804ae85 t5
148
148
149 See documentation of difftags output for details about the input.
149 See documentation of difftags output for details about the input.
150 """
150 """
151 add = '+A %s %s\n'
151 add = '+A %s %s\n'
152 remove = '-R %s %s\n'
152 remove = '-R %s %s\n'
153 updateold = '-M %s %s\n'
153 updateold = '-M %s %s\n'
154 updatenew = '+M %s %s\n'
154 updatenew = '+M %s %s\n'
155 for tag, old, new in difflist:
155 for tag, old, new in difflist:
156 # translate to hex
156 # translate to hex
157 if old is not None:
157 if old is not None:
158 old = hex(old)
158 old = hex(old)
159 if new is not None:
159 if new is not None:
160 new = hex(new)
160 new = hex(new)
161 # write to file
161 # write to file
162 if old is None:
162 if old is None:
163 fp.write(add % (new, tag))
163 fp.write(add % (new, tag))
164 elif new is None:
164 elif new is None:
165 fp.write(remove % (old, tag))
165 fp.write(remove % (old, tag))
166 else:
166 else:
167 fp.write(updateold % (old, tag))
167 fp.write(updateold % (old, tag))
168 fp.write(updatenew % (new, tag))
168 fp.write(updatenew % (new, tag))
169
169
170 def findglobaltags(ui, repo):
170 def findglobaltags(ui, repo):
171 '''Find global tags in a repo: return a tagsmap
171 '''Find global tags in a repo: return a tagsmap
172
172
173 tagsmap: tag name to (node, hist) 2-tuples.
173 tagsmap: tag name to (node, hist) 2-tuples.
174
174
175 The tags cache is read and updated as a side-effect of calling.
175 The tags cache is read and updated as a side-effect of calling.
176 '''
176 '''
177 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
177 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
178 if cachetags is not None:
178 if cachetags is not None:
179 assert not shouldwrite
179 assert not shouldwrite
180 # XXX is this really 100% correct? are there oddball special
180 # XXX is this really 100% correct? are there oddball special
181 # cases where a global tag should outrank a local tag but won't,
181 # cases where a global tag should outrank a local tag but won't,
182 # because cachetags does not contain rank info?
182 # because cachetags does not contain rank info?
183 alltags = {}
183 alltags = {}
184 _updatetags(cachetags, alltags)
184 _updatetags(cachetags, alltags)
185 return alltags
185 return alltags
186
186
187 for head in reversed(heads): # oldest to newest
187 for head in reversed(heads): # oldest to newest
188 assert head in repo.changelog.nodemap, \
188 assert head in repo.changelog.nodemap, \
189 "tag cache returned bogus head %s" % short(head)
189 "tag cache returned bogus head %s" % short(head)
190 fnodes = _filterfnodes(tagfnode, reversed(heads))
190 fnodes = _filterfnodes(tagfnode, reversed(heads))
191 alltags = _tagsfromfnodes(ui, repo, fnodes)
191 alltags = _tagsfromfnodes(ui, repo, fnodes)
192
192
193 # and update the cache (if necessary)
193 # and update the cache (if necessary)
194 if shouldwrite:
194 if shouldwrite:
195 _writetagcache(ui, repo, valid, alltags)
195 _writetagcache(ui, repo, valid, alltags)
196 return alltags
196 return alltags
197
197
198 def _filterfnodes(tagfnode, nodes):
198 def _filterfnodes(tagfnode, nodes):
199 """return a list of unique fnodes
199 """return a list of unique fnodes
200
200
201 The order of this list matches the order of "nodes". Preserving this order
201 The order of this list matches the order of "nodes". Preserving this order
202 is important as reading tags in different order provides different
202 is important as reading tags in different order provides different
203 results."""
203 results."""
204 seen = set() # set of fnode
204 seen = set() # set of fnode
205 fnodes = []
205 fnodes = []
206 for no in nodes: # oldest to newest
206 for no in nodes: # oldest to newest
207 fnode = tagfnode.get(no)
207 fnode = tagfnode.get(no)
208 if fnode and fnode not in seen:
208 if fnode and fnode not in seen:
209 seen.add(fnode)
209 seen.add(fnode)
210 fnodes.append(fnode)
210 fnodes.append(fnode)
211 return fnodes
211 return fnodes
212
212
213 def _tagsfromfnodes(ui, repo, fnodes):
213 def _tagsfromfnodes(ui, repo, fnodes):
214 """return a tagsmap from a list of file-node
214 """return a tagsmap from a list of file-node
215
215
216 tagsmap: tag name to (node, hist) 2-tuples.
216 tagsmap: tag name to (node, hist) 2-tuples.
217
217
218 The order of the list matters."""
218 The order of the list matters."""
219 alltags = {}
219 alltags = {}
220 fctx = None
220 fctx = None
221 for fnode in fnodes:
221 for fnode in fnodes:
222 if fctx is None:
222 if fctx is None:
223 fctx = repo.filectx('.hgtags', fileid=fnode)
223 fctx = repo.filectx('.hgtags', fileid=fnode)
224 else:
224 else:
225 fctx = fctx.filectx(fnode)
225 fctx = fctx.filectx(fnode)
226 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
226 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
227 _updatetags(filetags, alltags)
227 _updatetags(filetags, alltags)
228 return alltags
228 return alltags
229
229
230 def readlocaltags(ui, repo, alltags, tagtypes):
230 def readlocaltags(ui, repo, alltags, tagtypes):
231 '''Read local tags in repo. Update alltags and tagtypes.'''
231 '''Read local tags in repo. Update alltags and tagtypes.'''
232 try:
232 try:
233 data = repo.vfs.read("localtags")
233 data = repo.vfs.read("localtags")
234 except IOError as inst:
234 except IOError as inst:
235 if inst.errno != errno.ENOENT:
235 if inst.errno != errno.ENOENT:
236 raise
236 raise
237 return
237 return
238
238
239 # localtags is in the local encoding; re-encode to UTF-8 on
239 # localtags is in the local encoding; re-encode to UTF-8 on
240 # input for consistency with the rest of this module.
240 # input for consistency with the rest of this module.
241 filetags = _readtags(
241 filetags = _readtags(
242 ui, repo, data.splitlines(), "localtags",
242 ui, repo, data.splitlines(), "localtags",
243 recode=encoding.fromlocal)
243 recode=encoding.fromlocal)
244
244
245 # remove tags pointing to invalid nodes
245 # remove tags pointing to invalid nodes
246 cl = repo.changelog
246 cl = repo.changelog
247 for t in filetags.keys():
247 for t in list(filetags):
248 try:
248 try:
249 cl.rev(filetags[t][0])
249 cl.rev(filetags[t][0])
250 except (LookupError, ValueError):
250 except (LookupError, ValueError):
251 del filetags[t]
251 del filetags[t]
252
252
253 _updatetags(filetags, alltags, 'local', tagtypes)
253 _updatetags(filetags, alltags, 'local', tagtypes)
254
254
255 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
255 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
256 '''Read tag definitions from a file (or any source of lines).
256 '''Read tag definitions from a file (or any source of lines).
257
257
258 This function returns two sortdicts with similar information:
258 This function returns two sortdicts with similar information:
259
259
260 - the first dict, bintaghist, contains the tag information as expected by
260 - the first dict, bintaghist, contains the tag information as expected by
261 the _readtags function, i.e. a mapping from tag name to (node, hist):
261 the _readtags function, i.e. a mapping from tag name to (node, hist):
262 - node is the node id from the last line read for that name,
262 - node is the node id from the last line read for that name,
263 - hist is the list of node ids previously associated with it (in file
263 - hist is the list of node ids previously associated with it (in file
264 order). All node ids are binary, not hex.
264 order). All node ids are binary, not hex.
265
265
266 - the second dict, hextaglines, is a mapping from tag name to a list of
266 - the second dict, hextaglines, is a mapping from tag name to a list of
267 [hexnode, line number] pairs, ordered from the oldest to the newest node.
267 [hexnode, line number] pairs, ordered from the oldest to the newest node.
268
268
269 When calcnodelines is False the hextaglines dict is not calculated (an
269 When calcnodelines is False the hextaglines dict is not calculated (an
270 empty dict is returned). This is done to improve this function's
270 empty dict is returned). This is done to improve this function's
271 performance in cases where the line numbers are not needed.
271 performance in cases where the line numbers are not needed.
272 '''
272 '''
273
273
274 bintaghist = util.sortdict()
274 bintaghist = util.sortdict()
275 hextaglines = util.sortdict()
275 hextaglines = util.sortdict()
276 count = 0
276 count = 0
277
277
278 def dbg(msg):
278 def dbg(msg):
279 ui.debug("%s, line %s: %s\n" % (fn, count, msg))
279 ui.debug("%s, line %s: %s\n" % (fn, count, msg))
280
280
281 for nline, line in enumerate(lines):
281 for nline, line in enumerate(lines):
282 count += 1
282 count += 1
283 if not line:
283 if not line:
284 continue
284 continue
285 try:
285 try:
286 (nodehex, name) = line.split(" ", 1)
286 (nodehex, name) = line.split(" ", 1)
287 except ValueError:
287 except ValueError:
288 dbg("cannot parse entry")
288 dbg("cannot parse entry")
289 continue
289 continue
290 name = name.strip()
290 name = name.strip()
291 if recode:
291 if recode:
292 name = recode(name)
292 name = recode(name)
293 try:
293 try:
294 nodebin = bin(nodehex)
294 nodebin = bin(nodehex)
295 except TypeError:
295 except TypeError:
296 dbg("node '%s' is not well formed" % nodehex)
296 dbg("node '%s' is not well formed" % nodehex)
297 continue
297 continue
298
298
299 # update filetags
299 # update filetags
300 if calcnodelines:
300 if calcnodelines:
301 # map tag name to a list of line numbers
301 # map tag name to a list of line numbers
302 if name not in hextaglines:
302 if name not in hextaglines:
303 hextaglines[name] = []
303 hextaglines[name] = []
304 hextaglines[name].append([nodehex, nline])
304 hextaglines[name].append([nodehex, nline])
305 continue
305 continue
306 # map tag name to (node, hist)
306 # map tag name to (node, hist)
307 if name not in bintaghist:
307 if name not in bintaghist:
308 bintaghist[name] = []
308 bintaghist[name] = []
309 bintaghist[name].append(nodebin)
309 bintaghist[name].append(nodebin)
310 return bintaghist, hextaglines
310 return bintaghist, hextaglines
311
311
312 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
312 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
313 '''Read tag definitions from a file (or any source of lines).
313 '''Read tag definitions from a file (or any source of lines).
314
314
315 Returns a mapping from tag name to (node, hist).
315 Returns a mapping from tag name to (node, hist).
316
316
317 "node" is the node id from the last line read for that name. "hist"
317 "node" is the node id from the last line read for that name. "hist"
318 is the list of node ids previously associated with it (in file order).
318 is the list of node ids previously associated with it (in file order).
319 All node ids are binary, not hex.
319 All node ids are binary, not hex.
320 '''
320 '''
321 filetags, nodelines = _readtaghist(ui, repo, lines, fn, recode=recode,
321 filetags, nodelines = _readtaghist(ui, repo, lines, fn, recode=recode,
322 calcnodelines=calcnodelines)
322 calcnodelines=calcnodelines)
323 # util.sortdict().__setitem__ is much slower at replacing then inserting
323 # util.sortdict().__setitem__ is much slower at replacing then inserting
324 # new entries. The difference can matter if there are thousands of tags.
324 # new entries. The difference can matter if there are thousands of tags.
325 # Create a new sortdict to avoid the performance penalty.
325 # Create a new sortdict to avoid the performance penalty.
326 newtags = util.sortdict()
326 newtags = util.sortdict()
327 for tag, taghist in filetags.items():
327 for tag, taghist in filetags.items():
328 newtags[tag] = (taghist[-1], taghist[:-1])
328 newtags[tag] = (taghist[-1], taghist[:-1])
329 return newtags
329 return newtags
330
330
331 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
331 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
332 """Incorporate the tag info read from one file into dictionnaries
332 """Incorporate the tag info read from one file into dictionnaries
333
333
334 The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
334 The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
335
335
336 The second one, 'tagtypes', is optional and will be updated to track the
336 The second one, 'tagtypes', is optional and will be updated to track the
337 "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
337 "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
338 needs to be set."""
338 needs to be set."""
339 if tagtype is None:
339 if tagtype is None:
340 assert tagtypes is None
340 assert tagtypes is None
341
341
342 for name, nodehist in filetags.iteritems():
342 for name, nodehist in filetags.iteritems():
343 if name not in alltags:
343 if name not in alltags:
344 alltags[name] = nodehist
344 alltags[name] = nodehist
345 if tagtype is not None:
345 if tagtype is not None:
346 tagtypes[name] = tagtype
346 tagtypes[name] = tagtype
347 continue
347 continue
348
348
349 # we prefer alltags[name] if:
349 # we prefer alltags[name] if:
350 # it supersedes us OR
350 # it supersedes us OR
351 # mutual supersedes and it has a higher rank
351 # mutual supersedes and it has a higher rank
352 # otherwise we win because we're tip-most
352 # otherwise we win because we're tip-most
353 anode, ahist = nodehist
353 anode, ahist = nodehist
354 bnode, bhist = alltags[name]
354 bnode, bhist = alltags[name]
355 if (bnode != anode and anode in bhist and
355 if (bnode != anode and anode in bhist and
356 (bnode not in ahist or len(bhist) > len(ahist))):
356 (bnode not in ahist or len(bhist) > len(ahist))):
357 anode = bnode
357 anode = bnode
358 elif tagtype is not None:
358 elif tagtype is not None:
359 tagtypes[name] = tagtype
359 tagtypes[name] = tagtype
360 ahist.extend([n for n in bhist if n not in ahist])
360 ahist.extend([n for n in bhist if n not in ahist])
361 alltags[name] = anode, ahist
361 alltags[name] = anode, ahist
362
362
363 def _filename(repo):
363 def _filename(repo):
364 """name of a tagcache file for a given repo or repoview"""
364 """name of a tagcache file for a given repo or repoview"""
365 filename = 'tags2'
365 filename = 'tags2'
366 if repo.filtername:
366 if repo.filtername:
367 filename = '%s-%s' % (filename, repo.filtername)
367 filename = '%s-%s' % (filename, repo.filtername)
368 return filename
368 return filename
369
369
370 def _readtagcache(ui, repo):
370 def _readtagcache(ui, repo):
371 '''Read the tag cache.
371 '''Read the tag cache.
372
372
373 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
373 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
374
374
375 If the cache is completely up-to-date, "cachetags" is a dict of the
375 If the cache is completely up-to-date, "cachetags" is a dict of the
376 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
376 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
377 None and "shouldwrite" is False.
377 None and "shouldwrite" is False.
378
378
379 If the cache is not up to date, "cachetags" is None. "heads" is a list
379 If the cache is not up to date, "cachetags" is None. "heads" is a list
380 of all heads currently in the repository, ordered from tip to oldest.
380 of all heads currently in the repository, ordered from tip to oldest.
381 "validinfo" is a tuple describing cache validation info. This is used
381 "validinfo" is a tuple describing cache validation info. This is used
382 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
382 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
383 filenode. "shouldwrite" is True.
383 filenode. "shouldwrite" is True.
384
384
385 If the cache is not up to date, the caller is responsible for reading tag
385 If the cache is not up to date, the caller is responsible for reading tag
386 info from each returned head. (See findglobaltags().)
386 info from each returned head. (See findglobaltags().)
387 '''
387 '''
388 try:
388 try:
389 cachefile = repo.cachevfs(_filename(repo), 'r')
389 cachefile = repo.cachevfs(_filename(repo), 'r')
390 # force reading the file for static-http
390 # force reading the file for static-http
391 cachelines = iter(cachefile)
391 cachelines = iter(cachefile)
392 except IOError:
392 except IOError:
393 cachefile = None
393 cachefile = None
394
394
395 cacherev = None
395 cacherev = None
396 cachenode = None
396 cachenode = None
397 cachehash = None
397 cachehash = None
398 if cachefile:
398 if cachefile:
399 try:
399 try:
400 validline = next(cachelines)
400 validline = next(cachelines)
401 validline = validline.split()
401 validline = validline.split()
402 cacherev = int(validline[0])
402 cacherev = int(validline[0])
403 cachenode = bin(validline[1])
403 cachenode = bin(validline[1])
404 if len(validline) > 2:
404 if len(validline) > 2:
405 cachehash = bin(validline[2])
405 cachehash = bin(validline[2])
406 except Exception:
406 except Exception:
407 # corruption of the cache, just recompute it.
407 # corruption of the cache, just recompute it.
408 pass
408 pass
409
409
410 tipnode = repo.changelog.tip()
410 tipnode = repo.changelog.tip()
411 tiprev = len(repo.changelog) - 1
411 tiprev = len(repo.changelog) - 1
412
412
413 # Case 1 (common): tip is the same, so nothing has changed.
413 # Case 1 (common): tip is the same, so nothing has changed.
414 # (Unchanged tip trivially means no changesets have been added.
414 # (Unchanged tip trivially means no changesets have been added.
415 # But, thanks to localrepository.destroyed(), it also means none
415 # But, thanks to localrepository.destroyed(), it also means none
416 # have been destroyed by strip or rollback.)
416 # have been destroyed by strip or rollback.)
417 if (cacherev == tiprev
417 if (cacherev == tiprev
418 and cachenode == tipnode
418 and cachenode == tipnode
419 and cachehash == scmutil.filteredhash(repo, tiprev)):
419 and cachehash == scmutil.filteredhash(repo, tiprev)):
420 tags = _readtags(ui, repo, cachelines, cachefile.name)
420 tags = _readtags(ui, repo, cachelines, cachefile.name)
421 cachefile.close()
421 cachefile.close()
422 return (None, None, None, tags, False)
422 return (None, None, None, tags, False)
423 if cachefile:
423 if cachefile:
424 cachefile.close() # ignore rest of file
424 cachefile.close() # ignore rest of file
425
425
426 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
426 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
427
427
428 repoheads = repo.heads()
428 repoheads = repo.heads()
429 # Case 2 (uncommon): empty repo; get out quickly and don't bother
429 # Case 2 (uncommon): empty repo; get out quickly and don't bother
430 # writing an empty cache.
430 # writing an empty cache.
431 if repoheads == [nullid]:
431 if repoheads == [nullid]:
432 return ([], {}, valid, {}, False)
432 return ([], {}, valid, {}, False)
433
433
434 # Case 3 (uncommon): cache file missing or empty.
434 # Case 3 (uncommon): cache file missing or empty.
435
435
436 # Case 4 (uncommon): tip rev decreased. This should only happen
436 # Case 4 (uncommon): tip rev decreased. This should only happen
437 # when we're called from localrepository.destroyed(). Refresh the
437 # when we're called from localrepository.destroyed(). Refresh the
438 # cache so future invocations will not see disappeared heads in the
438 # cache so future invocations will not see disappeared heads in the
439 # cache.
439 # cache.
440
440
441 # Case 5 (common): tip has changed, so we've added/replaced heads.
441 # Case 5 (common): tip has changed, so we've added/replaced heads.
442
442
443 # As it happens, the code to handle cases 3, 4, 5 is the same.
443 # As it happens, the code to handle cases 3, 4, 5 is the same.
444
444
445 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
445 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
446 # exposed".
446 # exposed".
447 if not len(repo.file('.hgtags')):
447 if not len(repo.file('.hgtags')):
448 # No tags have ever been committed, so we can avoid a
448 # No tags have ever been committed, so we can avoid a
449 # potentially expensive search.
449 # potentially expensive search.
450 return ([], {}, valid, None, True)
450 return ([], {}, valid, None, True)
451
451
452
452
453 # Now we have to lookup the .hgtags filenode for every new head.
453 # Now we have to lookup the .hgtags filenode for every new head.
454 # This is the most expensive part of finding tags, so performance
454 # This is the most expensive part of finding tags, so performance
455 # depends primarily on the size of newheads. Worst case: no cache
455 # depends primarily on the size of newheads. Worst case: no cache
456 # file, so newheads == repoheads.
456 # file, so newheads == repoheads.
457 cachefnode = _getfnodes(ui, repo, repoheads)
457 cachefnode = _getfnodes(ui, repo, repoheads)
458
458
459 # Caller has to iterate over all heads, but can use the filenodes in
459 # Caller has to iterate over all heads, but can use the filenodes in
460 # cachefnode to get to each .hgtags revision quickly.
460 # cachefnode to get to each .hgtags revision quickly.
461 return (repoheads, cachefnode, valid, None, True)
461 return (repoheads, cachefnode, valid, None, True)
462
462
463 def _getfnodes(ui, repo, nodes):
463 def _getfnodes(ui, repo, nodes):
464 """return .hgtags fnodes for a list of changeset nodes
464 """return .hgtags fnodes for a list of changeset nodes
465
465
466 Return value is a {node: fnode} mapping. There will be no entry for nodes
466 Return value is a {node: fnode} mapping. There will be no entry for nodes
467 without a '.hgtags' file.
467 without a '.hgtags' file.
468 """
468 """
469 starttime = util.timer()
469 starttime = util.timer()
470 fnodescache = hgtagsfnodescache(repo.unfiltered())
470 fnodescache = hgtagsfnodescache(repo.unfiltered())
471 cachefnode = {}
471 cachefnode = {}
472 for node in reversed(nodes):
472 for node in reversed(nodes):
473 fnode = fnodescache.getfnode(node)
473 fnode = fnodescache.getfnode(node)
474 if fnode != nullid:
474 if fnode != nullid:
475 cachefnode[node] = fnode
475 cachefnode[node] = fnode
476
476
477 fnodescache.write()
477 fnodescache.write()
478
478
479 duration = util.timer() - starttime
479 duration = util.timer() - starttime
480 ui.log('tagscache',
480 ui.log('tagscache',
481 '%d/%d cache hits/lookups in %0.4f '
481 '%d/%d cache hits/lookups in %0.4f '
482 'seconds\n',
482 'seconds\n',
483 fnodescache.hitcount, fnodescache.lookupcount, duration)
483 fnodescache.hitcount, fnodescache.lookupcount, duration)
484 return cachefnode
484 return cachefnode
485
485
486 def _writetagcache(ui, repo, valid, cachetags):
486 def _writetagcache(ui, repo, valid, cachetags):
487 filename = _filename(repo)
487 filename = _filename(repo)
488 try:
488 try:
489 cachefile = repo.cachevfs(filename, 'w', atomictemp=True)
489 cachefile = repo.cachevfs(filename, 'w', atomictemp=True)
490 except (OSError, IOError):
490 except (OSError, IOError):
491 return
491 return
492
492
493 ui.log('tagscache', 'writing .hg/cache/%s with %d tags\n',
493 ui.log('tagscache', 'writing .hg/cache/%s with %d tags\n',
494 filename, len(cachetags))
494 filename, len(cachetags))
495
495
496 if valid[2]:
496 if valid[2]:
497 cachefile.write('%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2])))
497 cachefile.write('%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2])))
498 else:
498 else:
499 cachefile.write('%d %s\n' % (valid[0], hex(valid[1])))
499 cachefile.write('%d %s\n' % (valid[0], hex(valid[1])))
500
500
501 # Tag names in the cache are in UTF-8 -- which is the whole reason
501 # Tag names in the cache are in UTF-8 -- which is the whole reason
502 # we keep them in UTF-8 throughout this module. If we converted
502 # we keep them in UTF-8 throughout this module. If we converted
503 # them local encoding on input, we would lose info writing them to
503 # them local encoding on input, we would lose info writing them to
504 # the cache.
504 # the cache.
505 for (name, (node, hist)) in sorted(cachetags.iteritems()):
505 for (name, (node, hist)) in sorted(cachetags.iteritems()):
506 for n in hist:
506 for n in hist:
507 cachefile.write("%s %s\n" % (hex(n), name))
507 cachefile.write("%s %s\n" % (hex(n), name))
508 cachefile.write("%s %s\n" % (hex(node), name))
508 cachefile.write("%s %s\n" % (hex(node), name))
509
509
510 try:
510 try:
511 cachefile.close()
511 cachefile.close()
512 except (OSError, IOError):
512 except (OSError, IOError):
513 pass
513 pass
514
514
515 def tag(repo, names, node, message, local, user, date, editor=False):
515 def tag(repo, names, node, message, local, user, date, editor=False):
516 '''tag a revision with one or more symbolic names.
516 '''tag a revision with one or more symbolic names.
517
517
518 names is a list of strings or, when adding a single tag, names may be a
518 names is a list of strings or, when adding a single tag, names may be a
519 string.
519 string.
520
520
521 if local is True, the tags are stored in a per-repository file.
521 if local is True, the tags are stored in a per-repository file.
522 otherwise, they are stored in the .hgtags file, and a new
522 otherwise, they are stored in the .hgtags file, and a new
523 changeset is committed with the change.
523 changeset is committed with the change.
524
524
525 keyword arguments:
525 keyword arguments:
526
526
527 local: whether to store tags in non-version-controlled file
527 local: whether to store tags in non-version-controlled file
528 (default False)
528 (default False)
529
529
530 message: commit message to use if committing
530 message: commit message to use if committing
531
531
532 user: name of user to use if committing
532 user: name of user to use if committing
533
533
534 date: date tuple to use if committing'''
534 date: date tuple to use if committing'''
535
535
536 if not local:
536 if not local:
537 m = matchmod.exact(repo.root, '', ['.hgtags'])
537 m = matchmod.exact(repo.root, '', ['.hgtags'])
538 if any(repo.status(match=m, unknown=True, ignored=True)):
538 if any(repo.status(match=m, unknown=True, ignored=True)):
539 raise error.Abort(_('working copy of .hgtags is changed'),
539 raise error.Abort(_('working copy of .hgtags is changed'),
540 hint=_('please commit .hgtags manually'))
540 hint=_('please commit .hgtags manually'))
541
541
542 with repo.wlock():
542 with repo.wlock():
543 repo.tags() # instantiate the cache
543 repo.tags() # instantiate the cache
544 _tag(repo, names, node, message, local, user, date,
544 _tag(repo, names, node, message, local, user, date,
545 editor=editor)
545 editor=editor)
546
546
547 def _tag(repo, names, node, message, local, user, date, extra=None,
547 def _tag(repo, names, node, message, local, user, date, extra=None,
548 editor=False):
548 editor=False):
549 if isinstance(names, str):
549 if isinstance(names, str):
550 names = (names,)
550 names = (names,)
551
551
552 branches = repo.branchmap()
552 branches = repo.branchmap()
553 for name in names:
553 for name in names:
554 repo.hook('pretag', throw=True, node=hex(node), tag=name,
554 repo.hook('pretag', throw=True, node=hex(node), tag=name,
555 local=local)
555 local=local)
556 if name in branches:
556 if name in branches:
557 repo.ui.warn(_("warning: tag %s conflicts with existing"
557 repo.ui.warn(_("warning: tag %s conflicts with existing"
558 " branch name\n") % name)
558 " branch name\n") % name)
559
559
560 def writetags(fp, names, munge, prevtags):
560 def writetags(fp, names, munge, prevtags):
561 fp.seek(0, 2)
561 fp.seek(0, 2)
562 if prevtags and prevtags[-1] != '\n':
562 if prevtags and prevtags[-1] != '\n':
563 fp.write('\n')
563 fp.write('\n')
564 for name in names:
564 for name in names:
565 if munge:
565 if munge:
566 m = munge(name)
566 m = munge(name)
567 else:
567 else:
568 m = name
568 m = name
569
569
570 if (repo._tagscache.tagtypes and
570 if (repo._tagscache.tagtypes and
571 name in repo._tagscache.tagtypes):
571 name in repo._tagscache.tagtypes):
572 old = repo.tags().get(name, nullid)
572 old = repo.tags().get(name, nullid)
573 fp.write('%s %s\n' % (hex(old), m))
573 fp.write('%s %s\n' % (hex(old), m))
574 fp.write('%s %s\n' % (hex(node), m))
574 fp.write('%s %s\n' % (hex(node), m))
575 fp.close()
575 fp.close()
576
576
577 prevtags = ''
577 prevtags = ''
578 if local:
578 if local:
579 try:
579 try:
580 fp = repo.vfs('localtags', 'r+')
580 fp = repo.vfs('localtags', 'r+')
581 except IOError:
581 except IOError:
582 fp = repo.vfs('localtags', 'a')
582 fp = repo.vfs('localtags', 'a')
583 else:
583 else:
584 prevtags = fp.read()
584 prevtags = fp.read()
585
585
586 # local tags are stored in the current charset
586 # local tags are stored in the current charset
587 writetags(fp, names, None, prevtags)
587 writetags(fp, names, None, prevtags)
588 for name in names:
588 for name in names:
589 repo.hook('tag', node=hex(node), tag=name, local=local)
589 repo.hook('tag', node=hex(node), tag=name, local=local)
590 return
590 return
591
591
592 try:
592 try:
593 fp = repo.wvfs('.hgtags', 'rb+')
593 fp = repo.wvfs('.hgtags', 'rb+')
594 except IOError as e:
594 except IOError as e:
595 if e.errno != errno.ENOENT:
595 if e.errno != errno.ENOENT:
596 raise
596 raise
597 fp = repo.wvfs('.hgtags', 'ab')
597 fp = repo.wvfs('.hgtags', 'ab')
598 else:
598 else:
599 prevtags = fp.read()
599 prevtags = fp.read()
600
600
601 # committed tags are stored in UTF-8
601 # committed tags are stored in UTF-8
602 writetags(fp, names, encoding.fromlocal, prevtags)
602 writetags(fp, names, encoding.fromlocal, prevtags)
603
603
604 fp.close()
604 fp.close()
605
605
606 repo.invalidatecaches()
606 repo.invalidatecaches()
607
607
608 if '.hgtags' not in repo.dirstate:
608 if '.hgtags' not in repo.dirstate:
609 repo[None].add(['.hgtags'])
609 repo[None].add(['.hgtags'])
610
610
611 m = matchmod.exact(repo.root, '', ['.hgtags'])
611 m = matchmod.exact(repo.root, '', ['.hgtags'])
612 tagnode = repo.commit(message, user, date, extra=extra, match=m,
612 tagnode = repo.commit(message, user, date, extra=extra, match=m,
613 editor=editor)
613 editor=editor)
614
614
615 for name in names:
615 for name in names:
616 repo.hook('tag', node=hex(node), tag=name, local=local)
616 repo.hook('tag', node=hex(node), tag=name, local=local)
617
617
618 return tagnode
618 return tagnode
619
619
620 _fnodescachefile = 'hgtagsfnodes1'
620 _fnodescachefile = 'hgtagsfnodes1'
621 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
621 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
622 _fnodesmissingrec = '\xff' * 24
622 _fnodesmissingrec = '\xff' * 24
623
623
624 class hgtagsfnodescache(object):
624 class hgtagsfnodescache(object):
625 """Persistent cache mapping revisions to .hgtags filenodes.
625 """Persistent cache mapping revisions to .hgtags filenodes.
626
626
627 The cache is an array of records. Each item in the array corresponds to
627 The cache is an array of records. Each item in the array corresponds to
628 a changelog revision. Values in the array contain the first 4 bytes of
628 a changelog revision. Values in the array contain the first 4 bytes of
629 the node hash and the 20 bytes .hgtags filenode for that revision.
629 the node hash and the 20 bytes .hgtags filenode for that revision.
630
630
631 The first 4 bytes are present as a form of verification. Repository
631 The first 4 bytes are present as a form of verification. Repository
632 stripping and rewriting may change the node at a numeric revision in the
632 stripping and rewriting may change the node at a numeric revision in the
633 changelog. The changeset fragment serves as a verifier to detect
633 changelog. The changeset fragment serves as a verifier to detect
634 rewriting. This logic is shared with the rev branch cache (see
634 rewriting. This logic is shared with the rev branch cache (see
635 branchmap.py).
635 branchmap.py).
636
636
637 The instance holds in memory the full cache content but entries are
637 The instance holds in memory the full cache content but entries are
638 only parsed on read.
638 only parsed on read.
639
639
640 Instances behave like lists. ``c[i]`` works where i is a rev or
640 Instances behave like lists. ``c[i]`` works where i is a rev or
641 changeset node. Missing indexes are populated automatically on access.
641 changeset node. Missing indexes are populated automatically on access.
642 """
642 """
643 def __init__(self, repo):
643 def __init__(self, repo):
644 assert repo.filtername is None
644 assert repo.filtername is None
645
645
646 self._repo = repo
646 self._repo = repo
647
647
648 # Only for reporting purposes.
648 # Only for reporting purposes.
649 self.lookupcount = 0
649 self.lookupcount = 0
650 self.hitcount = 0
650 self.hitcount = 0
651
651
652
652
653 try:
653 try:
654 data = repo.cachevfs.read(_fnodescachefile)
654 data = repo.cachevfs.read(_fnodescachefile)
655 except (OSError, IOError):
655 except (OSError, IOError):
656 data = ""
656 data = ""
657 self._raw = bytearray(data)
657 self._raw = bytearray(data)
658
658
659 # The end state of self._raw is an array that is of the exact length
659 # The end state of self._raw is an array that is of the exact length
660 # required to hold a record for every revision in the repository.
660 # required to hold a record for every revision in the repository.
661 # We truncate or extend the array as necessary. self._dirtyoffset is
661 # We truncate or extend the array as necessary. self._dirtyoffset is
662 # defined to be the start offset at which we need to write the output
662 # defined to be the start offset at which we need to write the output
663 # file. This offset is also adjusted when new entries are calculated
663 # file. This offset is also adjusted when new entries are calculated
664 # for array members.
664 # for array members.
665 cllen = len(repo.changelog)
665 cllen = len(repo.changelog)
666 wantedlen = cllen * _fnodesrecsize
666 wantedlen = cllen * _fnodesrecsize
667 rawlen = len(self._raw)
667 rawlen = len(self._raw)
668
668
669 self._dirtyoffset = None
669 self._dirtyoffset = None
670
670
671 if rawlen < wantedlen:
671 if rawlen < wantedlen:
672 self._dirtyoffset = rawlen
672 self._dirtyoffset = rawlen
673 self._raw.extend('\xff' * (wantedlen - rawlen))
673 self._raw.extend('\xff' * (wantedlen - rawlen))
674 elif rawlen > wantedlen:
674 elif rawlen > wantedlen:
675 # There's no easy way to truncate array instances. This seems
675 # There's no easy way to truncate array instances. This seems
676 # slightly less evil than copying a potentially large array slice.
676 # slightly less evil than copying a potentially large array slice.
677 for i in range(rawlen - wantedlen):
677 for i in range(rawlen - wantedlen):
678 self._raw.pop()
678 self._raw.pop()
679 self._dirtyoffset = len(self._raw)
679 self._dirtyoffset = len(self._raw)
680
680
681 def getfnode(self, node, computemissing=True):
681 def getfnode(self, node, computemissing=True):
682 """Obtain the filenode of the .hgtags file at a specified revision.
682 """Obtain the filenode of the .hgtags file at a specified revision.
683
683
684 If the value is in the cache, the entry will be validated and returned.
684 If the value is in the cache, the entry will be validated and returned.
685 Otherwise, the filenode will be computed and returned unless
685 Otherwise, the filenode will be computed and returned unless
686 "computemissing" is False, in which case None will be returned without
686 "computemissing" is False, in which case None will be returned without
687 any potentially expensive computation being performed.
687 any potentially expensive computation being performed.
688
688
689 If an .hgtags does not exist at the specified revision, nullid is
689 If an .hgtags does not exist at the specified revision, nullid is
690 returned.
690 returned.
691 """
691 """
692 ctx = self._repo[node]
692 ctx = self._repo[node]
693 rev = ctx.rev()
693 rev = ctx.rev()
694
694
695 self.lookupcount += 1
695 self.lookupcount += 1
696
696
697 offset = rev * _fnodesrecsize
697 offset = rev * _fnodesrecsize
698 record = '%s' % self._raw[offset:offset + _fnodesrecsize]
698 record = '%s' % self._raw[offset:offset + _fnodesrecsize]
699 properprefix = node[0:4]
699 properprefix = node[0:4]
700
700
701 # Validate and return existing entry.
701 # Validate and return existing entry.
702 if record != _fnodesmissingrec:
702 if record != _fnodesmissingrec:
703 fileprefix = record[0:4]
703 fileprefix = record[0:4]
704
704
705 if fileprefix == properprefix:
705 if fileprefix == properprefix:
706 self.hitcount += 1
706 self.hitcount += 1
707 return record[4:]
707 return record[4:]
708
708
709 # Fall through.
709 # Fall through.
710
710
711 # If we get here, the entry is either missing or invalid.
711 # If we get here, the entry is either missing or invalid.
712
712
713 if not computemissing:
713 if not computemissing:
714 return None
714 return None
715
715
716 # Populate missing entry.
716 # Populate missing entry.
717 try:
717 try:
718 fnode = ctx.filenode('.hgtags')
718 fnode = ctx.filenode('.hgtags')
719 except error.LookupError:
719 except error.LookupError:
720 # No .hgtags file on this revision.
720 # No .hgtags file on this revision.
721 fnode = nullid
721 fnode = nullid
722
722
723 self._writeentry(offset, properprefix, fnode)
723 self._writeentry(offset, properprefix, fnode)
724 return fnode
724 return fnode
725
725
726 def setfnode(self, node, fnode):
726 def setfnode(self, node, fnode):
727 """Set the .hgtags filenode for a given changeset."""
727 """Set the .hgtags filenode for a given changeset."""
728 assert len(fnode) == 20
728 assert len(fnode) == 20
729 ctx = self._repo[node]
729 ctx = self._repo[node]
730
730
731 # Do a lookup first to avoid writing if nothing has changed.
731 # Do a lookup first to avoid writing if nothing has changed.
732 if self.getfnode(ctx.node(), computemissing=False) == fnode:
732 if self.getfnode(ctx.node(), computemissing=False) == fnode:
733 return
733 return
734
734
735 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
735 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
736
736
737 def _writeentry(self, offset, prefix, fnode):
737 def _writeentry(self, offset, prefix, fnode):
738 # Slices on array instances only accept other array.
738 # Slices on array instances only accept other array.
739 entry = bytearray(prefix + fnode)
739 entry = bytearray(prefix + fnode)
740 self._raw[offset:offset + _fnodesrecsize] = entry
740 self._raw[offset:offset + _fnodesrecsize] = entry
741 # self._dirtyoffset could be None.
741 # self._dirtyoffset could be None.
742 self._dirtyoffset = min(self._dirtyoffset, offset) or 0
742 self._dirtyoffset = min(self._dirtyoffset, offset) or 0
743
743
744 def write(self):
744 def write(self):
745 """Perform all necessary writes to cache file.
745 """Perform all necessary writes to cache file.
746
746
747 This may no-op if no writes are needed or if a write lock could
747 This may no-op if no writes are needed or if a write lock could
748 not be obtained.
748 not be obtained.
749 """
749 """
750 if self._dirtyoffset is None:
750 if self._dirtyoffset is None:
751 return
751 return
752
752
753 data = self._raw[self._dirtyoffset:]
753 data = self._raw[self._dirtyoffset:]
754 if not data:
754 if not data:
755 return
755 return
756
756
757 repo = self._repo
757 repo = self._repo
758
758
759 try:
759 try:
760 lock = repo.wlock(wait=False)
760 lock = repo.wlock(wait=False)
761 except error.LockError:
761 except error.LockError:
762 repo.ui.log('tagscache', 'not writing .hg/cache/%s because '
762 repo.ui.log('tagscache', 'not writing .hg/cache/%s because '
763 'lock cannot be acquired\n' % (_fnodescachefile))
763 'lock cannot be acquired\n' % (_fnodescachefile))
764 return
764 return
765
765
766 try:
766 try:
767 f = repo.cachevfs.open(_fnodescachefile, 'ab')
767 f = repo.cachevfs.open(_fnodescachefile, 'ab')
768 try:
768 try:
769 # if the file has been truncated
769 # if the file has been truncated
770 actualoffset = f.tell()
770 actualoffset = f.tell()
771 if actualoffset < self._dirtyoffset:
771 if actualoffset < self._dirtyoffset:
772 self._dirtyoffset = actualoffset
772 self._dirtyoffset = actualoffset
773 data = self._raw[self._dirtyoffset:]
773 data = self._raw[self._dirtyoffset:]
774 f.seek(self._dirtyoffset)
774 f.seek(self._dirtyoffset)
775 f.truncate()
775 f.truncate()
776 repo.ui.log('tagscache',
776 repo.ui.log('tagscache',
777 'writing %d bytes to cache/%s\n' % (
777 'writing %d bytes to cache/%s\n' % (
778 len(data), _fnodescachefile))
778 len(data), _fnodescachefile))
779 f.write(data)
779 f.write(data)
780 self._dirtyoffset = None
780 self._dirtyoffset = None
781 finally:
781 finally:
782 f.close()
782 f.close()
783 except (IOError, OSError) as inst:
783 except (IOError, OSError) as inst:
784 repo.ui.log('tagscache',
784 repo.ui.log('tagscache',
785 "couldn't write cache/%s: %s\n" % (
785 "couldn't write cache/%s: %s\n" % (
786 _fnodescachefile, inst))
786 _fnodescachefile, inst))
787 finally:
787 finally:
788 lock.release()
788 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now