##// END OF EJS Templates
narrow: use `running_status` in `updateworkingcopy`...
marmoute -
r51030:31be0b46 default
parent child Browse files
Show More
@@ -1,356 +1,362 b''
1 # narrowspec.py - methods for working with a narrow view of a repository
1 # narrowspec.py - methods for working with a narrow view of a repository
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 from .i18n import _
9 from .i18n import _
10 from .pycompat import getattr
10 from .pycompat import getattr
11 from . import (
11 from . import (
12 error,
12 error,
13 match as matchmod,
13 match as matchmod,
14 merge,
14 merge,
15 mergestate as mergestatemod,
15 mergestate as mergestatemod,
16 requirements,
16 requirements,
17 scmutil,
17 scmutil,
18 sparse,
18 sparse,
19 util,
19 util,
20 )
20 )
21
21
22 # The file in .hg/store/ that indicates which paths exit in the store
22 # The file in .hg/store/ that indicates which paths exit in the store
23 FILENAME = b'narrowspec'
23 FILENAME = b'narrowspec'
24 # The file in .hg/ that indicates which paths exit in the dirstate
24 # The file in .hg/ that indicates which paths exit in the dirstate
25 DIRSTATE_FILENAME = b'narrowspec.dirstate'
25 DIRSTATE_FILENAME = b'narrowspec.dirstate'
26
26
27 # Pattern prefixes that are allowed in narrow patterns. This list MUST
27 # Pattern prefixes that are allowed in narrow patterns. This list MUST
28 # only contain patterns that are fast and safe to evaluate. Keep in mind
28 # only contain patterns that are fast and safe to evaluate. Keep in mind
29 # that patterns are supplied by clients and executed on remote servers
29 # that patterns are supplied by clients and executed on remote servers
30 # as part of wire protocol commands. That means that changes to this
30 # as part of wire protocol commands. That means that changes to this
31 # data structure influence the wire protocol and should not be taken
31 # data structure influence the wire protocol and should not be taken
32 # lightly - especially removals.
32 # lightly - especially removals.
33 VALID_PREFIXES = (
33 VALID_PREFIXES = (
34 b'path:',
34 b'path:',
35 b'rootfilesin:',
35 b'rootfilesin:',
36 )
36 )
37
37
38
38
39 def normalizesplitpattern(kind, pat):
39 def normalizesplitpattern(kind, pat):
40 """Returns the normalized version of a pattern and kind.
40 """Returns the normalized version of a pattern and kind.
41
41
42 Returns a tuple with the normalized kind and normalized pattern.
42 Returns a tuple with the normalized kind and normalized pattern.
43 """
43 """
44 pat = pat.rstrip(b'/')
44 pat = pat.rstrip(b'/')
45 _validatepattern(pat)
45 _validatepattern(pat)
46 return kind, pat
46 return kind, pat
47
47
48
48
49 def _numlines(s):
49 def _numlines(s):
50 """Returns the number of lines in s, including ending empty lines."""
50 """Returns the number of lines in s, including ending empty lines."""
51 # We use splitlines because it is Unicode-friendly and thus Python 3
51 # We use splitlines because it is Unicode-friendly and thus Python 3
52 # compatible. However, it does not count empty lines at the end, so trick
52 # compatible. However, it does not count empty lines at the end, so trick
53 # it by adding a character at the end.
53 # it by adding a character at the end.
54 return len((s + b'x').splitlines())
54 return len((s + b'x').splitlines())
55
55
56
56
57 def _validatepattern(pat):
57 def _validatepattern(pat):
58 """Validates the pattern and aborts if it is invalid.
58 """Validates the pattern and aborts if it is invalid.
59
59
60 Patterns are stored in the narrowspec as newline-separated
60 Patterns are stored in the narrowspec as newline-separated
61 POSIX-style bytestring paths. There's no escaping.
61 POSIX-style bytestring paths. There's no escaping.
62 """
62 """
63
63
64 # We use newlines as separators in the narrowspec file, so don't allow them
64 # We use newlines as separators in the narrowspec file, so don't allow them
65 # in patterns.
65 # in patterns.
66 if _numlines(pat) > 1:
66 if _numlines(pat) > 1:
67 raise error.Abort(_(b'newlines are not allowed in narrowspec paths'))
67 raise error.Abort(_(b'newlines are not allowed in narrowspec paths'))
68
68
69 components = pat.split(b'/')
69 components = pat.split(b'/')
70 if b'.' in components or b'..' in components:
70 if b'.' in components or b'..' in components:
71 raise error.Abort(
71 raise error.Abort(
72 _(b'"." and ".." are not allowed in narrowspec paths')
72 _(b'"." and ".." are not allowed in narrowspec paths')
73 )
73 )
74
74
75
75
76 def normalizepattern(pattern, defaultkind=b'path'):
76 def normalizepattern(pattern, defaultkind=b'path'):
77 """Returns the normalized version of a text-format pattern.
77 """Returns the normalized version of a text-format pattern.
78
78
79 If the pattern has no kind, the default will be added.
79 If the pattern has no kind, the default will be added.
80 """
80 """
81 kind, pat = matchmod._patsplit(pattern, defaultkind)
81 kind, pat = matchmod._patsplit(pattern, defaultkind)
82 return b'%s:%s' % normalizesplitpattern(kind, pat)
82 return b'%s:%s' % normalizesplitpattern(kind, pat)
83
83
84
84
85 def parsepatterns(pats):
85 def parsepatterns(pats):
86 """Parses an iterable of patterns into a typed pattern set.
86 """Parses an iterable of patterns into a typed pattern set.
87
87
88 Patterns are assumed to be ``path:`` if no prefix is present.
88 Patterns are assumed to be ``path:`` if no prefix is present.
89 For safety and performance reasons, only some prefixes are allowed.
89 For safety and performance reasons, only some prefixes are allowed.
90 See ``validatepatterns()``.
90 See ``validatepatterns()``.
91
91
92 This function should be used on patterns that come from the user to
92 This function should be used on patterns that come from the user to
93 normalize and validate them to the internal data structure used for
93 normalize and validate them to the internal data structure used for
94 representing patterns.
94 representing patterns.
95 """
95 """
96 res = {normalizepattern(orig) for orig in pats}
96 res = {normalizepattern(orig) for orig in pats}
97 validatepatterns(res)
97 validatepatterns(res)
98 return res
98 return res
99
99
100
100
101 def validatepatterns(pats):
101 def validatepatterns(pats):
102 """Validate that patterns are in the expected data structure and format.
102 """Validate that patterns are in the expected data structure and format.
103
103
104 And that is a set of normalized patterns beginning with ``path:`` or
104 And that is a set of normalized patterns beginning with ``path:`` or
105 ``rootfilesin:``.
105 ``rootfilesin:``.
106
106
107 This function should be used to validate internal data structures
107 This function should be used to validate internal data structures
108 and patterns that are loaded from sources that use the internal,
108 and patterns that are loaded from sources that use the internal,
109 prefixed pattern representation (but can't necessarily be fully trusted).
109 prefixed pattern representation (but can't necessarily be fully trusted).
110 """
110 """
111 with util.timedcm('narrowspec.validatepatterns(pats size=%d)', len(pats)):
111 with util.timedcm('narrowspec.validatepatterns(pats size=%d)', len(pats)):
112 if not isinstance(pats, set):
112 if not isinstance(pats, set):
113 raise error.ProgrammingError(
113 raise error.ProgrammingError(
114 b'narrow patterns should be a set; got %r' % pats
114 b'narrow patterns should be a set; got %r' % pats
115 )
115 )
116
116
117 for pat in pats:
117 for pat in pats:
118 if not pat.startswith(VALID_PREFIXES):
118 if not pat.startswith(VALID_PREFIXES):
119 # Use a Mercurial exception because this can happen due to user
119 # Use a Mercurial exception because this can happen due to user
120 # bugs (e.g. manually updating spec file).
120 # bugs (e.g. manually updating spec file).
121 raise error.Abort(
121 raise error.Abort(
122 _(b'invalid prefix on narrow pattern: %s') % pat,
122 _(b'invalid prefix on narrow pattern: %s') % pat,
123 hint=_(
123 hint=_(
124 b'narrow patterns must begin with one of '
124 b'narrow patterns must begin with one of '
125 b'the following: %s'
125 b'the following: %s'
126 )
126 )
127 % b', '.join(VALID_PREFIXES),
127 % b', '.join(VALID_PREFIXES),
128 )
128 )
129
129
130
130
131 def format(includes, excludes):
131 def format(includes, excludes):
132 output = b'[include]\n'
132 output = b'[include]\n'
133 for i in sorted(includes - excludes):
133 for i in sorted(includes - excludes):
134 output += i + b'\n'
134 output += i + b'\n'
135 output += b'[exclude]\n'
135 output += b'[exclude]\n'
136 for e in sorted(excludes):
136 for e in sorted(excludes):
137 output += e + b'\n'
137 output += e + b'\n'
138 return output
138 return output
139
139
140
140
141 def match(root, include=None, exclude=None):
141 def match(root, include=None, exclude=None):
142 if not include:
142 if not include:
143 # Passing empty include and empty exclude to matchmod.match()
143 # Passing empty include and empty exclude to matchmod.match()
144 # gives a matcher that matches everything, so explicitly use
144 # gives a matcher that matches everything, so explicitly use
145 # the nevermatcher.
145 # the nevermatcher.
146 return matchmod.never()
146 return matchmod.never()
147 return matchmod.match(
147 return matchmod.match(
148 root, b'', [], include=include or [], exclude=exclude or []
148 root, b'', [], include=include or [], exclude=exclude or []
149 )
149 )
150
150
151
151
152 def parseconfig(ui, spec):
152 def parseconfig(ui, spec):
153 # maybe we should care about the profiles returned too
153 # maybe we should care about the profiles returned too
154 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, b'narrow')
154 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, b'narrow')
155 if profiles:
155 if profiles:
156 raise error.Abort(
156 raise error.Abort(
157 _(
157 _(
158 b"including other spec files using '%include' is not"
158 b"including other spec files using '%include' is not"
159 b" supported in narrowspec"
159 b" supported in narrowspec"
160 )
160 )
161 )
161 )
162
162
163 validatepatterns(includepats)
163 validatepatterns(includepats)
164 validatepatterns(excludepats)
164 validatepatterns(excludepats)
165
165
166 return includepats, excludepats
166 return includepats, excludepats
167
167
168
168
169 def load(repo):
169 def load(repo):
170 # Treat "narrowspec does not exist" the same as "narrowspec file exists
170 # Treat "narrowspec does not exist" the same as "narrowspec file exists
171 # and is empty".
171 # and is empty".
172 spec = repo.svfs.tryread(FILENAME)
172 spec = repo.svfs.tryread(FILENAME)
173 return parseconfig(repo.ui, spec)
173 return parseconfig(repo.ui, spec)
174
174
175
175
176 def save(repo, includepats, excludepats):
176 def save(repo, includepats, excludepats):
177 validatepatterns(includepats)
177 validatepatterns(includepats)
178 validatepatterns(excludepats)
178 validatepatterns(excludepats)
179 spec = format(includepats, excludepats)
179 spec = format(includepats, excludepats)
180 repo.svfs.write(FILENAME, spec)
180 repo.svfs.write(FILENAME, spec)
181
181
182
182
183 def copytoworkingcopy(repo):
183 def copytoworkingcopy(repo):
184 spec = repo.svfs.read(FILENAME)
184 spec = repo.svfs.read(FILENAME)
185 repo.vfs.write(DIRSTATE_FILENAME, spec)
185 repo.vfs.write(DIRSTATE_FILENAME, spec)
186
186
187
187
188 def savebackup(repo, backupname):
188 def savebackup(repo, backupname):
189 if requirements.NARROW_REQUIREMENT not in repo.requirements:
189 if requirements.NARROW_REQUIREMENT not in repo.requirements:
190 return
190 return
191 svfs = repo.svfs
191 svfs = repo.svfs
192 svfs.tryunlink(backupname)
192 svfs.tryunlink(backupname)
193 util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
193 util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
194
194
195
195
196 def restorebackup(repo, backupname):
196 def restorebackup(repo, backupname):
197 if requirements.NARROW_REQUIREMENT not in repo.requirements:
197 if requirements.NARROW_REQUIREMENT not in repo.requirements:
198 return
198 return
199 util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
199 util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
200
200
201
201
202 def savewcbackup(repo, backupname):
202 def savewcbackup(repo, backupname):
203 if requirements.NARROW_REQUIREMENT not in repo.requirements:
203 if requirements.NARROW_REQUIREMENT not in repo.requirements:
204 return
204 return
205 vfs = repo.vfs
205 vfs = repo.vfs
206 vfs.tryunlink(backupname)
206 vfs.tryunlink(backupname)
207 # It may not exist in old repos
207 # It may not exist in old repos
208 if vfs.exists(DIRSTATE_FILENAME):
208 if vfs.exists(DIRSTATE_FILENAME):
209 util.copyfile(
209 util.copyfile(
210 vfs.join(DIRSTATE_FILENAME), vfs.join(backupname), hardlink=True
210 vfs.join(DIRSTATE_FILENAME), vfs.join(backupname), hardlink=True
211 )
211 )
212
212
213
213
214 def restorewcbackup(repo, backupname):
214 def restorewcbackup(repo, backupname):
215 if requirements.NARROW_REQUIREMENT not in repo.requirements:
215 if requirements.NARROW_REQUIREMENT not in repo.requirements:
216 return
216 return
217 # It may not exist in old repos
217 # It may not exist in old repos
218 if repo.vfs.exists(backupname):
218 if repo.vfs.exists(backupname):
219 util.rename(repo.vfs.join(backupname), repo.vfs.join(DIRSTATE_FILENAME))
219 util.rename(repo.vfs.join(backupname), repo.vfs.join(DIRSTATE_FILENAME))
220
220
221
221
222 def clearwcbackup(repo, backupname):
222 def clearwcbackup(repo, backupname):
223 if requirements.NARROW_REQUIREMENT not in repo.requirements:
223 if requirements.NARROW_REQUIREMENT not in repo.requirements:
224 return
224 return
225 repo.vfs.tryunlink(backupname)
225 repo.vfs.tryunlink(backupname)
226
226
227
227
228 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
228 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
229 r"""Restricts the patterns according to repo settings,
229 r"""Restricts the patterns according to repo settings,
230 results in a logical AND operation
230 results in a logical AND operation
231
231
232 :param req_includes: requested includes
232 :param req_includes: requested includes
233 :param req_excludes: requested excludes
233 :param req_excludes: requested excludes
234 :param repo_includes: repo includes
234 :param repo_includes: repo includes
235 :param repo_excludes: repo excludes
235 :param repo_excludes: repo excludes
236 :return: include patterns, exclude patterns, and invalid include patterns.
236 :return: include patterns, exclude patterns, and invalid include patterns.
237 """
237 """
238 res_excludes = set(req_excludes)
238 res_excludes = set(req_excludes)
239 res_excludes.update(repo_excludes)
239 res_excludes.update(repo_excludes)
240 invalid_includes = []
240 invalid_includes = []
241 if not req_includes:
241 if not req_includes:
242 res_includes = set(repo_includes)
242 res_includes = set(repo_includes)
243 elif b'path:.' not in repo_includes:
243 elif b'path:.' not in repo_includes:
244 res_includes = []
244 res_includes = []
245 for req_include in req_includes:
245 for req_include in req_includes:
246 req_include = util.expandpath(util.normpath(req_include))
246 req_include = util.expandpath(util.normpath(req_include))
247 if req_include in repo_includes:
247 if req_include in repo_includes:
248 res_includes.append(req_include)
248 res_includes.append(req_include)
249 continue
249 continue
250 valid = False
250 valid = False
251 for repo_include in repo_includes:
251 for repo_include in repo_includes:
252 if req_include.startswith(repo_include + b'/'):
252 if req_include.startswith(repo_include + b'/'):
253 valid = True
253 valid = True
254 res_includes.append(req_include)
254 res_includes.append(req_include)
255 break
255 break
256 if not valid:
256 if not valid:
257 invalid_includes.append(req_include)
257 invalid_includes.append(req_include)
258 if len(res_includes) == 0:
258 if len(res_includes) == 0:
259 res_excludes = {b'path:.'}
259 res_excludes = {b'path:.'}
260 else:
260 else:
261 res_includes = set(res_includes)
261 res_includes = set(res_includes)
262 else:
262 else:
263 res_includes = set(req_includes)
263 res_includes = set(req_includes)
264 return res_includes, res_excludes, invalid_includes
264 return res_includes, res_excludes, invalid_includes
265
265
266
266
267 # These two are extracted for extensions (specifically for Google's CitC file
267 # These two are extracted for extensions (specifically for Google's CitC file
268 # system)
268 # system)
269 def _deletecleanfiles(repo, files):
269 def _deletecleanfiles(repo, files):
270 for f in files:
270 for f in files:
271 repo.wvfs.unlinkpath(f)
271 repo.wvfs.unlinkpath(f)
272
272
273
273
274 def _writeaddedfiles(repo, pctx, files):
274 def _writeaddedfiles(repo, pctx, files):
275 mresult = merge.mergeresult()
275 mresult = merge.mergeresult()
276 mf = repo[b'.'].manifest()
276 mf = repo[b'.'].manifest()
277 for f in files:
277 for f in files:
278 if not repo.wvfs.exists(f):
278 if not repo.wvfs.exists(f):
279 mresult.addfile(
279 mresult.addfile(
280 f,
280 f,
281 mergestatemod.ACTION_GET,
281 mergestatemod.ACTION_GET,
282 (mf.flags(f), False),
282 (mf.flags(f), False),
283 b"narrowspec updated",
283 b"narrowspec updated",
284 )
284 )
285 merge.applyupdates(
285 merge.applyupdates(
286 repo,
286 repo,
287 mresult,
287 mresult,
288 wctx=repo[None],
288 wctx=repo[None],
289 mctx=repo[b'.'],
289 mctx=repo[b'.'],
290 overwrite=False,
290 overwrite=False,
291 wantfiledata=False,
291 wantfiledata=False,
292 )
292 )
293
293
294
294
295 def checkworkingcopynarrowspec(repo):
295 def checkworkingcopynarrowspec(repo):
296 # Avoid infinite recursion when updating the working copy
296 # Avoid infinite recursion when updating the working copy
297 if getattr(repo, '_updatingnarrowspec', False):
297 if getattr(repo, '_updatingnarrowspec', False):
298 return
298 return
299 storespec = repo.svfs.tryread(FILENAME)
299 storespec = repo.svfs.tryread(FILENAME)
300 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
300 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
301 if wcspec != storespec:
301 if wcspec != storespec:
302 raise error.StateError(
302 raise error.StateError(
303 _(b"working copy's narrowspec is stale"),
303 _(b"working copy's narrowspec is stale"),
304 hint=_(b"run 'hg tracked --update-working-copy'"),
304 hint=_(b"run 'hg tracked --update-working-copy'"),
305 )
305 )
306
306
307
307
308 def updateworkingcopy(repo, assumeclean=False):
308 def updateworkingcopy(repo, assumeclean=False):
309 """updates the working copy and dirstate from the store narrowspec
309 """updates the working copy and dirstate from the store narrowspec
310
310
311 When assumeclean=True, files that are not known to be clean will also
311 When assumeclean=True, files that are not known to be clean will also
312 be deleted. It is then up to the caller to make sure they are clean.
312 be deleted. It is then up to the caller to make sure they are clean.
313 """
313 """
314 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
314 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
315 newspec = repo.svfs.tryread(FILENAME)
315 newspec = repo.svfs.tryread(FILENAME)
316 repo._updatingnarrowspec = True
316 repo._updatingnarrowspec = True
317
317
318 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
318 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
319 newincludes, newexcludes = parseconfig(repo.ui, newspec)
319 newincludes, newexcludes = parseconfig(repo.ui, newspec)
320 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
320 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
321 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
321 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
322 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
322 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
323 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
323 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
324
324
325 assert repo.currentwlock() is not None
325 ds = repo.dirstate
326 ds = repo.dirstate
326 lookup, status, _mtime_boundary = ds.status(
327 with ds.running_status(repo):
327 removedmatch, subrepos=[], ignored=True, clean=True, unknown=True
328 lookup, status, _mtime_boundary = ds.status(
328 )
329 removedmatch,
330 subrepos=[],
331 ignored=True,
332 clean=True,
333 unknown=True,
334 )
329 trackeddirty = status.modified + status.added
335 trackeddirty = status.modified + status.added
330 clean = status.clean
336 clean = status.clean
331 if assumeclean:
337 if assumeclean:
332 clean.extend(lookup)
338 clean.extend(lookup)
333 else:
339 else:
334 trackeddirty.extend(lookup)
340 trackeddirty.extend(lookup)
335 _deletecleanfiles(repo, clean)
341 _deletecleanfiles(repo, clean)
336 uipathfn = scmutil.getuipathfn(repo)
342 uipathfn = scmutil.getuipathfn(repo)
337 for f in sorted(trackeddirty):
343 for f in sorted(trackeddirty):
338 repo.ui.status(
344 repo.ui.status(
339 _(b'not deleting possibly dirty file %s\n') % uipathfn(f)
345 _(b'not deleting possibly dirty file %s\n') % uipathfn(f)
340 )
346 )
341 for f in sorted(status.unknown):
347 for f in sorted(status.unknown):
342 repo.ui.status(_(b'not deleting unknown file %s\n') % uipathfn(f))
348 repo.ui.status(_(b'not deleting unknown file %s\n') % uipathfn(f))
343 for f in sorted(status.ignored):
349 for f in sorted(status.ignored):
344 repo.ui.status(_(b'not deleting ignored file %s\n') % uipathfn(f))
350 repo.ui.status(_(b'not deleting ignored file %s\n') % uipathfn(f))
345 for f in clean + trackeddirty:
351 for f in clean + trackeddirty:
346 ds.update_file(f, p1_tracked=False, wc_tracked=False)
352 ds.update_file(f, p1_tracked=False, wc_tracked=False)
347
353
348 pctx = repo[b'.']
354 pctx = repo[b'.']
349
355
350 # only update added files that are in the sparse checkout
356 # only update added files that are in the sparse checkout
351 addedmatch = matchmod.intersectmatchers(addedmatch, sparse.matcher(repo))
357 addedmatch = matchmod.intersectmatchers(addedmatch, sparse.matcher(repo))
352 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
358 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
353 for f in newfiles:
359 for f in newfiles:
354 ds.update_file(f, p1_tracked=True, wc_tracked=True, possibly_dirty=True)
360 ds.update_file(f, p1_tracked=True, wc_tracked=True, possibly_dirty=True)
355 _writeaddedfiles(repo, pctx, newfiles)
361 _writeaddedfiles(repo, pctx, newfiles)
356 repo._updatingnarrowspec = False
362 repo._updatingnarrowspec = False
General Comments 0
You need to be logged in to leave comments. Login now