##// END OF EJS Templates
tags: remove the old non-caching implementation of findglobaltags()....
Greg Ward -
r11351:1cdc8b5e default
parent child Browse files
Show More
@@ -1,341 +1,313 b''
1 # tags.py - read tag info from local repository
1 # tags.py - read tag info from local repository
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 # Currently this module only deals with reading and caching tags.
9 # Currently this module only deals with reading and caching tags.
10 # Eventually, it could take care of updating (adding/removing/moving)
10 # Eventually, it could take care of updating (adding/removing/moving)
11 # tags too.
11 # tags too.
12
12
13 from node import nullid, bin, hex, short
13 from node import nullid, bin, hex, short
14 from i18n import _
14 from i18n import _
15 import encoding
15 import encoding
16 import error
16 import error
17
17
18 def _debugalways(ui, *msg):
18 def _debugalways(ui, *msg):
19 ui.write(*msg)
19 ui.write(*msg)
20
20
21 def _debugconditional(ui, *msg):
21 def _debugconditional(ui, *msg):
22 ui.debug(*msg)
22 ui.debug(*msg)
23
23
24 def _debugnever(ui, *msg):
24 def _debugnever(ui, *msg):
25 pass
25 pass
26
26
27 _debug = _debugalways
27 _debug = _debugalways
28 _debug = _debugnever
28 _debug = _debugnever
29
29
30 def findglobaltags1(ui, repo, alltags, tagtypes):
30 def findglobaltags(ui, repo, alltags, tagtypes):
31 '''Find global tags in repo by reading .hgtags from every head that
31 '''Find global tags in repo by reading .hgtags from every head that
32 has a distinct version of it. Updates the dicts alltags, tagtypes
32 has a distinct version of it, using a cache to avoid excess work.
33 in place: alltags maps tag name to (node, hist) pair (see _readtags()
33 Updates the dicts alltags, tagtypes in place: alltags maps tag name
34 below), and tagtypes maps tag name to tag type ('global' in this
34 to (node, hist) pair (see _readtags() below), and tagtypes maps tag
35 case).'''
35 name to tag type ("global" in this case).'''
36
37 seen = set()
38 fctx = None
39 ctxs = [] # list of filectx
40 for node in repo.heads():
41 try:
42 fnode = repo[node].filenode('.hgtags')
43 except error.LookupError:
44 continue
45 if fnode not in seen:
46 seen.add(fnode)
47 if not fctx:
48 fctx = repo.filectx('.hgtags', fileid=fnode)
49 else:
50 fctx = fctx.filectx(fnode)
51 ctxs.append(fctx)
52
53 # read the tags file from each head, ending with the tip
54 for fctx in reversed(ctxs):
55 filetags = _readtags(
56 ui, repo, fctx.data().splitlines(), fctx)
57 _updatetags(filetags, "global", alltags, tagtypes)
58
59 def findglobaltags2(ui, repo, alltags, tagtypes):
60 '''Same as findglobaltags1(), but with caching.'''
61 # This is so we can be lazy and assume alltags contains only global
36 # This is so we can be lazy and assume alltags contains only global
62 # tags when we pass it to _writetagcache().
37 # tags when we pass it to _writetagcache().
63 assert len(alltags) == len(tagtypes) == 0, \
38 assert len(alltags) == len(tagtypes) == 0, \
64 "findglobaltags() should be called first"
39 "findglobaltags() should be called first"
65
40
66 (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo)
41 (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo)
67 if cachetags is not None:
42 if cachetags is not None:
68 assert not shouldwrite
43 assert not shouldwrite
69 # XXX is this really 100% correct? are there oddball special
44 # XXX is this really 100% correct? are there oddball special
70 # cases where a global tag should outrank a local tag but won't,
45 # cases where a global tag should outrank a local tag but won't,
71 # because cachetags does not contain rank info?
46 # because cachetags does not contain rank info?
72 _updatetags(cachetags, 'global', alltags, tagtypes)
47 _updatetags(cachetags, 'global', alltags, tagtypes)
73 return
48 return
74
49
75 _debug(ui, "reading tags from %d head(s): %s\n"
50 _debug(ui, "reading tags from %d head(s): %s\n"
76 % (len(heads), map(short, reversed(heads))))
51 % (len(heads), map(short, reversed(heads))))
77 seen = set() # set of fnode
52 seen = set() # set of fnode
78 fctx = None
53 fctx = None
79 for head in reversed(heads): # oldest to newest
54 for head in reversed(heads): # oldest to newest
80 assert head in repo.changelog.nodemap, \
55 assert head in repo.changelog.nodemap, \
81 "tag cache returned bogus head %s" % short(head)
56 "tag cache returned bogus head %s" % short(head)
82
57
83 fnode = tagfnode.get(head)
58 fnode = tagfnode.get(head)
84 if fnode and fnode not in seen:
59 if fnode and fnode not in seen:
85 seen.add(fnode)
60 seen.add(fnode)
86 if not fctx:
61 if not fctx:
87 fctx = repo.filectx('.hgtags', fileid=fnode)
62 fctx = repo.filectx('.hgtags', fileid=fnode)
88 else:
63 else:
89 fctx = fctx.filectx(fnode)
64 fctx = fctx.filectx(fnode)
90
65
91 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
66 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
92 _updatetags(filetags, 'global', alltags, tagtypes)
67 _updatetags(filetags, 'global', alltags, tagtypes)
93
68
94 # and update the cache (if necessary)
69 # and update the cache (if necessary)
95 if shouldwrite:
70 if shouldwrite:
96 _writetagcache(ui, repo, heads, tagfnode, alltags)
71 _writetagcache(ui, repo, heads, tagfnode, alltags)
97
72
98 # Set this to findglobaltags1 to disable tag caching.
99 findglobaltags = findglobaltags2
100
101 def readlocaltags(ui, repo, alltags, tagtypes):
73 def readlocaltags(ui, repo, alltags, tagtypes):
102 '''Read local tags in repo. Update alltags and tagtypes.'''
74 '''Read local tags in repo. Update alltags and tagtypes.'''
103 try:
75 try:
104 # localtags is in the local encoding; re-encode to UTF-8 on
76 # localtags is in the local encoding; re-encode to UTF-8 on
105 # input for consistency with the rest of this module.
77 # input for consistency with the rest of this module.
106 data = repo.opener("localtags").read()
78 data = repo.opener("localtags").read()
107 filetags = _readtags(
79 filetags = _readtags(
108 ui, repo, data.splitlines(), "localtags",
80 ui, repo, data.splitlines(), "localtags",
109 recode=encoding.fromlocal)
81 recode=encoding.fromlocal)
110 _updatetags(filetags, "local", alltags, tagtypes)
82 _updatetags(filetags, "local", alltags, tagtypes)
111 except IOError:
83 except IOError:
112 pass
84 pass
113
85
114 def _readtags(ui, repo, lines, fn, recode=None):
86 def _readtags(ui, repo, lines, fn, recode=None):
115 '''Read tag definitions from a file (or any source of lines).
87 '''Read tag definitions from a file (or any source of lines).
116 Return a mapping from tag name to (node, hist): node is the node id
88 Return a mapping from tag name to (node, hist): node is the node id
117 from the last line read for that name, and hist is the list of node
89 from the last line read for that name, and hist is the list of node
118 ids previously associated with it (in file order). All node ids are
90 ids previously associated with it (in file order). All node ids are
119 binary, not hex.'''
91 binary, not hex.'''
120
92
121 filetags = {} # map tag name to (node, hist)
93 filetags = {} # map tag name to (node, hist)
122 count = 0
94 count = 0
123
95
124 def warn(msg):
96 def warn(msg):
125 ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
97 ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
126
98
127 for line in lines:
99 for line in lines:
128 count += 1
100 count += 1
129 if not line:
101 if not line:
130 continue
102 continue
131 try:
103 try:
132 (nodehex, name) = line.split(" ", 1)
104 (nodehex, name) = line.split(" ", 1)
133 except ValueError:
105 except ValueError:
134 warn(_("cannot parse entry"))
106 warn(_("cannot parse entry"))
135 continue
107 continue
136 name = name.strip()
108 name = name.strip()
137 if recode:
109 if recode:
138 name = recode(name)
110 name = recode(name)
139 try:
111 try:
140 nodebin = bin(nodehex)
112 nodebin = bin(nodehex)
141 except TypeError:
113 except TypeError:
142 warn(_("node '%s' is not well formed") % nodehex)
114 warn(_("node '%s' is not well formed") % nodehex)
143 continue
115 continue
144 if nodebin not in repo.changelog.nodemap:
116 if nodebin not in repo.changelog.nodemap:
145 # silently ignore as pull -r might cause this
117 # silently ignore as pull -r might cause this
146 continue
118 continue
147
119
148 # update filetags
120 # update filetags
149 hist = []
121 hist = []
150 if name in filetags:
122 if name in filetags:
151 n, hist = filetags[name]
123 n, hist = filetags[name]
152 hist.append(n)
124 hist.append(n)
153 filetags[name] = (nodebin, hist)
125 filetags[name] = (nodebin, hist)
154 return filetags
126 return filetags
155
127
156 def _updatetags(filetags, tagtype, alltags, tagtypes):
128 def _updatetags(filetags, tagtype, alltags, tagtypes):
157 '''Incorporate the tag info read from one file into the two
129 '''Incorporate the tag info read from one file into the two
158 dictionaries, alltags and tagtypes, that contain all tag
130 dictionaries, alltags and tagtypes, that contain all tag
159 info (global across all heads plus local).'''
131 info (global across all heads plus local).'''
160
132
161 for name, nodehist in filetags.iteritems():
133 for name, nodehist in filetags.iteritems():
162 if name not in alltags:
134 if name not in alltags:
163 alltags[name] = nodehist
135 alltags[name] = nodehist
164 tagtypes[name] = tagtype
136 tagtypes[name] = tagtype
165 continue
137 continue
166
138
167 # we prefer alltags[name] if:
139 # we prefer alltags[name] if:
168 # it supercedes us OR
140 # it supercedes us OR
169 # mutual supercedes and it has a higher rank
141 # mutual supercedes and it has a higher rank
170 # otherwise we win because we're tip-most
142 # otherwise we win because we're tip-most
171 anode, ahist = nodehist
143 anode, ahist = nodehist
172 bnode, bhist = alltags[name]
144 bnode, bhist = alltags[name]
173 if (bnode != anode and anode in bhist and
145 if (bnode != anode and anode in bhist and
174 (bnode not in ahist or len(bhist) > len(ahist))):
146 (bnode not in ahist or len(bhist) > len(ahist))):
175 anode = bnode
147 anode = bnode
176 ahist.extend([n for n in bhist if n not in ahist])
148 ahist.extend([n for n in bhist if n not in ahist])
177 alltags[name] = anode, ahist
149 alltags[name] = anode, ahist
178 tagtypes[name] = tagtype
150 tagtypes[name] = tagtype
179
151
180
152
181 # The tag cache only stores info about heads, not the tag contents
153 # The tag cache only stores info about heads, not the tag contents
182 # from each head. I.e. it doesn't try to squeeze out the maximum
154 # from each head. I.e. it doesn't try to squeeze out the maximum
183 # performance, but is simpler has a better chance of actually
155 # performance, but is simpler has a better chance of actually
184 # working correctly. And this gives the biggest performance win: it
156 # working correctly. And this gives the biggest performance win: it
185 # avoids looking up .hgtags in the manifest for every head, and it
157 # avoids looking up .hgtags in the manifest for every head, and it
186 # can avoid calling heads() at all if there have been no changes to
158 # can avoid calling heads() at all if there have been no changes to
187 # the repo.
159 # the repo.
188
160
189 def _readtagcache(ui, repo):
161 def _readtagcache(ui, repo):
190 '''Read the tag cache and return a tuple (heads, fnodes, cachetags,
162 '''Read the tag cache and return a tuple (heads, fnodes, cachetags,
191 shouldwrite). If the cache is completely up-to-date, cachetags is a
163 shouldwrite). If the cache is completely up-to-date, cachetags is a
192 dict of the form returned by _readtags(); otherwise, it is None and
164 dict of the form returned by _readtags(); otherwise, it is None and
193 heads and fnodes are set. In that case, heads is the list of all
165 heads and fnodes are set. In that case, heads is the list of all
194 heads currently in the repository (ordered from tip to oldest) and
166 heads currently in the repository (ordered from tip to oldest) and
195 fnodes is a mapping from head to .hgtags filenode. If those two are
167 fnodes is a mapping from head to .hgtags filenode. If those two are
196 set, caller is responsible for reading tag info from each head.'''
168 set, caller is responsible for reading tag info from each head.'''
197
169
198 try:
170 try:
199 cachefile = repo.opener('tags.cache', 'r')
171 cachefile = repo.opener('tags.cache', 'r')
200 # force reading the file for static-http
172 # force reading the file for static-http
201 cachelines = iter(cachefile)
173 cachelines = iter(cachefile)
202 _debug(ui, 'reading tag cache from %s\n' % cachefile.name)
174 _debug(ui, 'reading tag cache from %s\n' % cachefile.name)
203 except IOError:
175 except IOError:
204 cachefile = None
176 cachefile = None
205
177
206 # The cache file consists of lines like
178 # The cache file consists of lines like
207 # <headrev> <headnode> [<tagnode>]
179 # <headrev> <headnode> [<tagnode>]
208 # where <headrev> and <headnode> redundantly identify a repository
180 # where <headrev> and <headnode> redundantly identify a repository
209 # head from the time the cache was written, and <tagnode> is the
181 # head from the time the cache was written, and <tagnode> is the
210 # filenode of .hgtags on that head. Heads with no .hgtags file will
182 # filenode of .hgtags on that head. Heads with no .hgtags file will
211 # have no <tagnode>. The cache is ordered from tip to oldest (which
183 # have no <tagnode>. The cache is ordered from tip to oldest (which
212 # is part of why <headrev> is there: a quick visual check is all
184 # is part of why <headrev> is there: a quick visual check is all
213 # that's required to ensure correct order).
185 # that's required to ensure correct order).
214 #
186 #
215 # This information is enough to let us avoid the most expensive part
187 # This information is enough to let us avoid the most expensive part
216 # of finding global tags, which is looking up <tagnode> in the
188 # of finding global tags, which is looking up <tagnode> in the
217 # manifest for each head.
189 # manifest for each head.
218 cacherevs = [] # list of headrev
190 cacherevs = [] # list of headrev
219 cacheheads = [] # list of headnode
191 cacheheads = [] # list of headnode
220 cachefnode = {} # map headnode to filenode
192 cachefnode = {} # map headnode to filenode
221 if cachefile:
193 if cachefile:
222 for line in cachelines:
194 for line in cachelines:
223 if line == "\n":
195 if line == "\n":
224 break
196 break
225 line = line.rstrip().split()
197 line = line.rstrip().split()
226 cacherevs.append(int(line[0]))
198 cacherevs.append(int(line[0]))
227 headnode = bin(line[1])
199 headnode = bin(line[1])
228 cacheheads.append(headnode)
200 cacheheads.append(headnode)
229 if len(line) == 3:
201 if len(line) == 3:
230 fnode = bin(line[2])
202 fnode = bin(line[2])
231 cachefnode[headnode] = fnode
203 cachefnode[headnode] = fnode
232
204
233 tipnode = repo.changelog.tip()
205 tipnode = repo.changelog.tip()
234 tiprev = len(repo.changelog) - 1
206 tiprev = len(repo.changelog) - 1
235
207
236 # Case 1 (common): tip is the same, so nothing has changed.
208 # Case 1 (common): tip is the same, so nothing has changed.
237 # (Unchanged tip trivially means no changesets have been added.
209 # (Unchanged tip trivially means no changesets have been added.
238 # But, thanks to localrepository.destroyed(), it also means none
210 # But, thanks to localrepository.destroyed(), it also means none
239 # have been destroyed by strip or rollback.)
211 # have been destroyed by strip or rollback.)
240 if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev:
212 if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev:
241 _debug(ui, "tag cache: tip unchanged\n")
213 _debug(ui, "tag cache: tip unchanged\n")
242 tags = _readtags(ui, repo, cachelines, cachefile.name)
214 tags = _readtags(ui, repo, cachelines, cachefile.name)
243 cachefile.close()
215 cachefile.close()
244 return (None, None, tags, False)
216 return (None, None, tags, False)
245 if cachefile:
217 if cachefile:
246 cachefile.close() # ignore rest of file
218 cachefile.close() # ignore rest of file
247
219
248 repoheads = repo.heads()
220 repoheads = repo.heads()
249 # Case 2 (uncommon): empty repo; get out quickly and don't bother
221 # Case 2 (uncommon): empty repo; get out quickly and don't bother
250 # writing an empty cache.
222 # writing an empty cache.
251 if repoheads == [nullid]:
223 if repoheads == [nullid]:
252 return ([], {}, {}, False)
224 return ([], {}, {}, False)
253
225
254 # Case 3 (uncommon): cache file missing or empty.
226 # Case 3 (uncommon): cache file missing or empty.
255 if not cacheheads:
227 if not cacheheads:
256 _debug(ui, 'tag cache: cache file missing or empty\n')
228 _debug(ui, 'tag cache: cache file missing or empty\n')
257
229
258 # Case 4 (uncommon): tip rev decreased. This should only happen
230 # Case 4 (uncommon): tip rev decreased. This should only happen
259 # when we're called from localrepository.destroyed(). Refresh the
231 # when we're called from localrepository.destroyed(). Refresh the
260 # cache so future invocations will not see disappeared heads in the
232 # cache so future invocations will not see disappeared heads in the
261 # cache.
233 # cache.
262 elif cacheheads and tiprev < cacherevs[0]:
234 elif cacheheads and tiprev < cacherevs[0]:
263 _debug(ui,
235 _debug(ui,
264 'tag cache: tip rev decremented (from %d to %d), '
236 'tag cache: tip rev decremented (from %d to %d), '
265 'so we must be destroying nodes\n'
237 'so we must be destroying nodes\n'
266 % (cacherevs[0], tiprev))
238 % (cacherevs[0], tiprev))
267
239
268 # Case 5 (common): tip has changed, so we've added/replaced heads.
240 # Case 5 (common): tip has changed, so we've added/replaced heads.
269 else:
241 else:
270 _debug(ui,
242 _debug(ui,
271 'tag cache: tip has changed (%d:%s); must find new heads\n'
243 'tag cache: tip has changed (%d:%s); must find new heads\n'
272 % (tiprev, short(tipnode)))
244 % (tiprev, short(tipnode)))
273
245
274 # Luckily, the code to handle cases 3, 4, 5 is the same. So the
246 # Luckily, the code to handle cases 3, 4, 5 is the same. So the
275 # above if/elif/else can disappear once we're confident this thing
247 # above if/elif/else can disappear once we're confident this thing
276 # actually works and we don't need the debug output.
248 # actually works and we don't need the debug output.
277
249
278 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
250 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
279 # exposed".
251 # exposed".
280 newheads = [head
252 newheads = [head
281 for head in repoheads
253 for head in repoheads
282 if head not in set(cacheheads)]
254 if head not in set(cacheheads)]
283 _debug(ui, 'tag cache: found %d head(s) not in cache: %s\n'
255 _debug(ui, 'tag cache: found %d head(s) not in cache: %s\n'
284 % (len(newheads), map(short, newheads)))
256 % (len(newheads), map(short, newheads)))
285
257
286 # Now we have to lookup the .hgtags filenode for every new head.
258 # Now we have to lookup the .hgtags filenode for every new head.
287 # This is the most expensive part of finding tags, so performance
259 # This is the most expensive part of finding tags, so performance
288 # depends primarily on the size of newheads. Worst case: no cache
260 # depends primarily on the size of newheads. Worst case: no cache
289 # file, so newheads == repoheads.
261 # file, so newheads == repoheads.
290 for head in newheads:
262 for head in newheads:
291 cctx = repo[head]
263 cctx = repo[head]
292 try:
264 try:
293 fnode = cctx.filenode('.hgtags')
265 fnode = cctx.filenode('.hgtags')
294 cachefnode[head] = fnode
266 cachefnode[head] = fnode
295 except error.LookupError:
267 except error.LookupError:
296 # no .hgtags file on this head
268 # no .hgtags file on this head
297 pass
269 pass
298
270
299 # Caller has to iterate over all heads, but can use the filenodes in
271 # Caller has to iterate over all heads, but can use the filenodes in
300 # cachefnode to get to each .hgtags revision quickly.
272 # cachefnode to get to each .hgtags revision quickly.
301 return (repoheads, cachefnode, None, True)
273 return (repoheads, cachefnode, None, True)
302
274
303 def _writetagcache(ui, repo, heads, tagfnode, cachetags):
275 def _writetagcache(ui, repo, heads, tagfnode, cachetags):
304
276
305 try:
277 try:
306 cachefile = repo.opener('tags.cache', 'w', atomictemp=True)
278 cachefile = repo.opener('tags.cache', 'w', atomictemp=True)
307 except (OSError, IOError):
279 except (OSError, IOError):
308 return
280 return
309 _debug(ui, 'writing cache file %s\n' % cachefile.name)
281 _debug(ui, 'writing cache file %s\n' % cachefile.name)
310
282
311 realheads = repo.heads() # for sanity checks below
283 realheads = repo.heads() # for sanity checks below
312 for head in heads:
284 for head in heads:
313 # temporary sanity checks; these can probably be removed
285 # temporary sanity checks; these can probably be removed
314 # once this code has been in crew for a few weeks
286 # once this code has been in crew for a few weeks
315 assert head in repo.changelog.nodemap, \
287 assert head in repo.changelog.nodemap, \
316 'trying to write non-existent node %s to tag cache' % short(head)
288 'trying to write non-existent node %s to tag cache' % short(head)
317 assert head in realheads, \
289 assert head in realheads, \
318 'trying to write non-head %s to tag cache' % short(head)
290 'trying to write non-head %s to tag cache' % short(head)
319 assert head != nullid, \
291 assert head != nullid, \
320 'trying to write nullid to tag cache'
292 'trying to write nullid to tag cache'
321
293
322 # This can't fail because of the first assert above. When/if we
294 # This can't fail because of the first assert above. When/if we
323 # remove that assert, we might want to catch LookupError here
295 # remove that assert, we might want to catch LookupError here
324 # and downgrade it to a warning.
296 # and downgrade it to a warning.
325 rev = repo.changelog.rev(head)
297 rev = repo.changelog.rev(head)
326
298
327 fnode = tagfnode.get(head)
299 fnode = tagfnode.get(head)
328 if fnode:
300 if fnode:
329 cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode)))
301 cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode)))
330 else:
302 else:
331 cachefile.write('%d %s\n' % (rev, hex(head)))
303 cachefile.write('%d %s\n' % (rev, hex(head)))
332
304
333 # Tag names in the cache are in UTF-8 -- which is the whole reason
305 # Tag names in the cache are in UTF-8 -- which is the whole reason
334 # we keep them in UTF-8 throughout this module. If we converted
306 # we keep them in UTF-8 throughout this module. If we converted
335 # them local encoding on input, we would lose info writing them to
307 # them local encoding on input, we would lose info writing them to
336 # the cache.
308 # the cache.
337 cachefile.write('\n')
309 cachefile.write('\n')
338 for (name, (node, hist)) in cachetags.iteritems():
310 for (name, (node, hist)) in cachetags.iteritems():
339 cachefile.write("%s %s\n" % (hex(node), name))
311 cachefile.write("%s %s\n" % (hex(node), name))
340
312
341 cachefile.rename()
313 cachefile.rename()
General Comments 0
You need to be logged in to leave comments. Login now