##// END OF EJS Templates
dirstate: mark {begin,end}parentchange as deprecated (API)
Augie Fackler -
r32352:b2de7fce default
parent child Browse files
Show More
@@ -1,1305 +1,1309 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 parsers,
23 23 pathutil,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 propertycache = util.propertycache
31 31 filecache = scmutil.filecache
32 32 _rangemask = 0x7fffffff
33 33
34 34 dirstatetuple = parsers.dirstatetuple
35 35
36 36 class repocache(filecache):
37 37 """filecache for files in .hg/"""
38 38 def join(self, obj, fname):
39 39 return obj._opener.join(fname)
40 40
41 41 class rootcache(filecache):
42 42 """filecache for files in the repository root"""
43 43 def join(self, obj, fname):
44 44 return obj._join(fname)
45 45
46 46 def _getfsnow(vfs):
47 47 '''Get "now" timestamp on filesystem'''
48 48 tmpfd, tmpname = vfs.mkstemp()
49 49 try:
50 50 return os.fstat(tmpfd).st_mtime
51 51 finally:
52 52 os.close(tmpfd)
53 53 vfs.unlink(tmpname)
54 54
55 55 def nonnormalentries(dmap):
56 56 '''Compute the nonnormal dirstate entries from the dmap'''
57 57 try:
58 58 return parsers.nonnormalotherparententries(dmap)
59 59 except AttributeError:
60 60 nonnorm = set()
61 61 otherparent = set()
62 62 for fname, e in dmap.iteritems():
63 63 if e[0] != 'n' or e[3] == -1:
64 64 nonnorm.add(fname)
65 65 if e[0] == 'n' and e[2] == -2:
66 66 otherparent.add(fname)
67 67 return nonnorm, otherparent
68 68
69 69 class dirstate(object):
70 70
71 71 def __init__(self, opener, ui, root, validate):
72 72 '''Create a new dirstate object.
73 73
74 74 opener is an open()-like callable that can be used to open the
75 75 dirstate file; root is the root of the directory tracked by
76 76 the dirstate.
77 77 '''
78 78 self._opener = opener
79 79 self._validate = validate
80 80 self._root = root
81 81 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
82 82 # UNC path pointing to root share (issue4557)
83 83 self._rootdir = pathutil.normasprefix(root)
84 84 # internal config: ui.forcecwd
85 85 forcecwd = ui.config('ui', 'forcecwd')
86 86 if forcecwd:
87 87 self._cwd = forcecwd
88 88 self._dirty = False
89 89 self._dirtypl = False
90 90 self._lastnormaltime = 0
91 91 self._ui = ui
92 92 self._filecache = {}
93 93 self._parentwriters = 0
94 94 self._filename = 'dirstate'
95 95 self._pendingfilename = '%s.pending' % self._filename
96 96 self._plchangecallbacks = {}
97 97 self._origpl = None
98 98 self._updatedfiles = set()
99 99
100 100 # for consistent view between _pl() and _read() invocations
101 101 self._pendingmode = None
102 102
103 103 @contextlib.contextmanager
104 104 def parentchange(self):
105 105 '''Context manager for handling dirstate parents.
106 106
107 107 If an exception occurs in the scope of the context manager,
108 108 the incoherent dirstate won't be written when wlock is
109 109 released.
110 110 '''
111 111 self._parentwriters += 1
112 112 yield
113 113 # Typically we want the "undo" step of a context manager in a
114 114 # finally block so it happens even when an exception
115 115 # occurs. In this case, however, we only want to decrement
116 116 # parentwriters if the code in the with statement exits
117 117 # normally, so we don't have a try/finally here on purpose.
118 118 self._parentwriters -= 1
119 119
120 120 def beginparentchange(self):
121 121 '''Marks the beginning of a set of changes that involve changing
122 122 the dirstate parents. If there is an exception during this time,
123 123 the dirstate will not be written when the wlock is released. This
124 124 prevents writing an incoherent dirstate where the parent doesn't
125 125 match the contents.
126 126 '''
127 self._ui.deprecwarn('beginparentchange is obsoleted by the '
128 'parentchange context manager.', '4.3')
127 129 self._parentwriters += 1
128 130
129 131 def endparentchange(self):
130 132 '''Marks the end of a set of changes that involve changing the
131 133 dirstate parents. Once all parent changes have been marked done,
132 134 the wlock will be free to write the dirstate on release.
133 135 '''
136 self._ui.deprecwarn('endparentchange is obsoleted by the '
137 'parentchange context manager.', '4.3')
134 138 if self._parentwriters > 0:
135 139 self._parentwriters -= 1
136 140
137 141 def pendingparentchange(self):
138 142 '''Returns true if the dirstate is in the middle of a set of changes
139 143 that modify the dirstate parent.
140 144 '''
141 145 return self._parentwriters > 0
142 146
143 147 @propertycache
144 148 def _map(self):
145 149 '''Return the dirstate contents as a map from filename to
146 150 (state, mode, size, time).'''
147 151 self._read()
148 152 return self._map
149 153
150 154 @propertycache
151 155 def _copymap(self):
152 156 self._read()
153 157 return self._copymap
154 158
155 159 @propertycache
156 160 def _nonnormalset(self):
157 161 nonnorm, otherparents = nonnormalentries(self._map)
158 162 self._otherparentset = otherparents
159 163 return nonnorm
160 164
161 165 @propertycache
162 166 def _otherparentset(self):
163 167 nonnorm, otherparents = nonnormalentries(self._map)
164 168 self._nonnormalset = nonnorm
165 169 return otherparents
166 170
167 171 @propertycache
168 172 def _filefoldmap(self):
169 173 try:
170 174 makefilefoldmap = parsers.make_file_foldmap
171 175 except AttributeError:
172 176 pass
173 177 else:
174 178 return makefilefoldmap(self._map, util.normcasespec,
175 179 util.normcasefallback)
176 180
177 181 f = {}
178 182 normcase = util.normcase
179 183 for name, s in self._map.iteritems():
180 184 if s[0] != 'r':
181 185 f[normcase(name)] = name
182 186 f['.'] = '.' # prevents useless util.fspath() invocation
183 187 return f
184 188
185 189 @propertycache
186 190 def _dirfoldmap(self):
187 191 f = {}
188 192 normcase = util.normcase
189 193 for name in self._dirs:
190 194 f[normcase(name)] = name
191 195 return f
192 196
193 197 @repocache('branch')
194 198 def _branch(self):
195 199 try:
196 200 return self._opener.read("branch").strip() or "default"
197 201 except IOError as inst:
198 202 if inst.errno != errno.ENOENT:
199 203 raise
200 204 return "default"
201 205
202 206 @propertycache
203 207 def _pl(self):
204 208 try:
205 209 fp = self._opendirstatefile()
206 210 st = fp.read(40)
207 211 fp.close()
208 212 l = len(st)
209 213 if l == 40:
210 214 return st[:20], st[20:40]
211 215 elif l > 0 and l < 40:
212 216 raise error.Abort(_('working directory state appears damaged!'))
213 217 except IOError as err:
214 218 if err.errno != errno.ENOENT:
215 219 raise
216 220 return [nullid, nullid]
217 221
218 222 @propertycache
219 223 def _dirs(self):
220 224 return util.dirs(self._map, 'r')
221 225
222 226 def dirs(self):
223 227 return self._dirs
224 228
225 229 @rootcache('.hgignore')
226 230 def _ignore(self):
227 231 files = self._ignorefiles()
228 232 if not files:
229 233 return util.never
230 234
231 235 pats = ['include:%s' % f for f in files]
232 236 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
233 237
234 238 @propertycache
235 239 def _slash(self):
236 240 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
237 241
238 242 @propertycache
239 243 def _checklink(self):
240 244 return util.checklink(self._root)
241 245
242 246 @propertycache
243 247 def _checkexec(self):
244 248 return util.checkexec(self._root)
245 249
246 250 @propertycache
247 251 def _checkcase(self):
248 252 return not util.fscasesensitive(self._join('.hg'))
249 253
250 254 def _join(self, f):
251 255 # much faster than os.path.join()
252 256 # it's safe because f is always a relative path
253 257 return self._rootdir + f
254 258
255 259 def flagfunc(self, buildfallback):
256 260 if self._checklink and self._checkexec:
257 261 def f(x):
258 262 try:
259 263 st = os.lstat(self._join(x))
260 264 if util.statislink(st):
261 265 return 'l'
262 266 if util.statisexec(st):
263 267 return 'x'
264 268 except OSError:
265 269 pass
266 270 return ''
267 271 return f
268 272
269 273 fallback = buildfallback()
270 274 if self._checklink:
271 275 def f(x):
272 276 if os.path.islink(self._join(x)):
273 277 return 'l'
274 278 if 'x' in fallback(x):
275 279 return 'x'
276 280 return ''
277 281 return f
278 282 if self._checkexec:
279 283 def f(x):
280 284 if 'l' in fallback(x):
281 285 return 'l'
282 286 if util.isexec(self._join(x)):
283 287 return 'x'
284 288 return ''
285 289 return f
286 290 else:
287 291 return fallback
288 292
289 293 @propertycache
290 294 def _cwd(self):
291 295 return pycompat.getcwd()
292 296
293 297 def getcwd(self):
294 298 '''Return the path from which a canonical path is calculated.
295 299
296 300 This path should be used to resolve file patterns or to convert
297 301 canonical paths back to file paths for display. It shouldn't be
298 302 used to get real file paths. Use vfs functions instead.
299 303 '''
300 304 cwd = self._cwd
301 305 if cwd == self._root:
302 306 return ''
303 307 # self._root ends with a path separator if self._root is '/' or 'C:\'
304 308 rootsep = self._root
305 309 if not util.endswithsep(rootsep):
306 310 rootsep += pycompat.ossep
307 311 if cwd.startswith(rootsep):
308 312 return cwd[len(rootsep):]
309 313 else:
310 314 # we're outside the repo. return an absolute path.
311 315 return cwd
312 316
313 317 def pathto(self, f, cwd=None):
314 318 if cwd is None:
315 319 cwd = self.getcwd()
316 320 path = util.pathto(self._root, cwd, f)
317 321 if self._slash:
318 322 return util.pconvert(path)
319 323 return path
320 324
321 325 def __getitem__(self, key):
322 326 '''Return the current state of key (a filename) in the dirstate.
323 327
324 328 States are:
325 329 n normal
326 330 m needs merging
327 331 r marked for removal
328 332 a marked for addition
329 333 ? not tracked
330 334 '''
331 335 return self._map.get(key, ("?",))[0]
332 336
333 337 def __contains__(self, key):
334 338 return key in self._map
335 339
336 340 def __iter__(self):
337 341 for x in sorted(self._map):
338 342 yield x
339 343
340 344 def iteritems(self):
341 345 return self._map.iteritems()
342 346
343 347 def parents(self):
344 348 return [self._validate(p) for p in self._pl]
345 349
346 350 def p1(self):
347 351 return self._validate(self._pl[0])
348 352
349 353 def p2(self):
350 354 return self._validate(self._pl[1])
351 355
352 356 def branch(self):
353 357 return encoding.tolocal(self._branch)
354 358
355 359 def setparents(self, p1, p2=nullid):
356 360 """Set dirstate parents to p1 and p2.
357 361
358 362 When moving from two parents to one, 'm' merged entries a
359 363 adjusted to normal and previous copy records discarded and
360 364 returned by the call.
361 365
362 366 See localrepo.setparents()
363 367 """
364 368 if self._parentwriters == 0:
365 369 raise ValueError("cannot set dirstate parent without "
366 370 "calling dirstate.beginparentchange")
367 371
368 372 self._dirty = self._dirtypl = True
369 373 oldp2 = self._pl[1]
370 374 if self._origpl is None:
371 375 self._origpl = self._pl
372 376 self._pl = p1, p2
373 377 copies = {}
374 378 if oldp2 != nullid and p2 == nullid:
375 379 candidatefiles = self._nonnormalset.union(self._otherparentset)
376 380 for f in candidatefiles:
377 381 s = self._map.get(f)
378 382 if s is None:
379 383 continue
380 384
381 385 # Discard 'm' markers when moving away from a merge state
382 386 if s[0] == 'm':
383 387 if f in self._copymap:
384 388 copies[f] = self._copymap[f]
385 389 self.normallookup(f)
386 390 # Also fix up otherparent markers
387 391 elif s[0] == 'n' and s[2] == -2:
388 392 if f in self._copymap:
389 393 copies[f] = self._copymap[f]
390 394 self.add(f)
391 395 return copies
392 396
393 397 def setbranch(self, branch):
394 398 self._branch = encoding.fromlocal(branch)
395 399 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
396 400 try:
397 401 f.write(self._branch + '\n')
398 402 f.close()
399 403
400 404 # make sure filecache has the correct stat info for _branch after
401 405 # replacing the underlying file
402 406 ce = self._filecache['_branch']
403 407 if ce:
404 408 ce.refresh()
405 409 except: # re-raises
406 410 f.discard()
407 411 raise
408 412
409 413 def _opendirstatefile(self):
410 414 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
411 415 if self._pendingmode is not None and self._pendingmode != mode:
412 416 fp.close()
413 417 raise error.Abort(_('working directory state may be '
414 418 'changed parallelly'))
415 419 self._pendingmode = mode
416 420 return fp
417 421
418 422 def _read(self):
419 423 self._map = {}
420 424 self._copymap = {}
421 425 try:
422 426 fp = self._opendirstatefile()
423 427 try:
424 428 st = fp.read()
425 429 finally:
426 430 fp.close()
427 431 except IOError as err:
428 432 if err.errno != errno.ENOENT:
429 433 raise
430 434 return
431 435 if not st:
432 436 return
433 437
434 438 if util.safehasattr(parsers, 'dict_new_presized'):
435 439 # Make an estimate of the number of files in the dirstate based on
436 440 # its size. From a linear regression on a set of real-world repos,
437 441 # all over 10,000 files, the size of a dirstate entry is 85
438 442 # bytes. The cost of resizing is significantly higher than the cost
439 443 # of filling in a larger presized dict, so subtract 20% from the
440 444 # size.
441 445 #
442 446 # This heuristic is imperfect in many ways, so in a future dirstate
443 447 # format update it makes sense to just record the number of entries
444 448 # on write.
445 449 self._map = parsers.dict_new_presized(len(st) / 71)
446 450
447 451 # Python's garbage collector triggers a GC each time a certain number
448 452 # of container objects (the number being defined by
449 453 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
450 454 # for each file in the dirstate. The C version then immediately marks
451 455 # them as not to be tracked by the collector. However, this has no
452 456 # effect on when GCs are triggered, only on what objects the GC looks
453 457 # into. This means that O(number of files) GCs are unavoidable.
454 458 # Depending on when in the process's lifetime the dirstate is parsed,
455 459 # this can get very expensive. As a workaround, disable GC while
456 460 # parsing the dirstate.
457 461 #
458 462 # (we cannot decorate the function directly since it is in a C module)
459 463 parse_dirstate = util.nogc(parsers.parse_dirstate)
460 464 p = parse_dirstate(self._map, self._copymap, st)
461 465 if not self._dirtypl:
462 466 self._pl = p
463 467
464 468 def invalidate(self):
465 469 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
466 470 "_pl", "_dirs", "_ignore", "_nonnormalset",
467 471 "_otherparentset"):
468 472 if a in self.__dict__:
469 473 delattr(self, a)
470 474 self._lastnormaltime = 0
471 475 self._dirty = False
472 476 self._updatedfiles.clear()
473 477 self._parentwriters = 0
474 478 self._origpl = None
475 479
476 480 def copy(self, source, dest):
477 481 """Mark dest as a copy of source. Unmark dest if source is None."""
478 482 if source == dest:
479 483 return
480 484 self._dirty = True
481 485 if source is not None:
482 486 self._copymap[dest] = source
483 487 self._updatedfiles.add(source)
484 488 self._updatedfiles.add(dest)
485 489 elif dest in self._copymap:
486 490 del self._copymap[dest]
487 491 self._updatedfiles.add(dest)
488 492
489 493 def copied(self, file):
490 494 return self._copymap.get(file, None)
491 495
492 496 def copies(self):
493 497 return self._copymap
494 498
495 499 def _droppath(self, f):
496 500 if self[f] not in "?r" and "_dirs" in self.__dict__:
497 501 self._dirs.delpath(f)
498 502
499 503 if "_filefoldmap" in self.__dict__:
500 504 normed = util.normcase(f)
501 505 if normed in self._filefoldmap:
502 506 del self._filefoldmap[normed]
503 507
504 508 self._updatedfiles.add(f)
505 509
506 510 def _addpath(self, f, state, mode, size, mtime):
507 511 oldstate = self[f]
508 512 if state == 'a' or oldstate == 'r':
509 513 scmutil.checkfilename(f)
510 514 if f in self._dirs:
511 515 raise error.Abort(_('directory %r already in dirstate') % f)
512 516 # shadows
513 517 for d in util.finddirs(f):
514 518 if d in self._dirs:
515 519 break
516 520 if d in self._map and self[d] != 'r':
517 521 raise error.Abort(
518 522 _('file %r in dirstate clashes with %r') % (d, f))
519 523 if oldstate in "?r" and "_dirs" in self.__dict__:
520 524 self._dirs.addpath(f)
521 525 self._dirty = True
522 526 self._updatedfiles.add(f)
523 527 self._map[f] = dirstatetuple(state, mode, size, mtime)
524 528 if state != 'n' or mtime == -1:
525 529 self._nonnormalset.add(f)
526 530 if size == -2:
527 531 self._otherparentset.add(f)
528 532
529 533 def normal(self, f):
530 534 '''Mark a file normal and clean.'''
531 535 s = os.lstat(self._join(f))
532 536 mtime = s.st_mtime
533 537 self._addpath(f, 'n', s.st_mode,
534 538 s.st_size & _rangemask, mtime & _rangemask)
535 539 if f in self._copymap:
536 540 del self._copymap[f]
537 541 if f in self._nonnormalset:
538 542 self._nonnormalset.remove(f)
539 543 if mtime > self._lastnormaltime:
540 544 # Remember the most recent modification timeslot for status(),
541 545 # to make sure we won't miss future size-preserving file content
542 546 # modifications that happen within the same timeslot.
543 547 self._lastnormaltime = mtime
544 548
545 549 def normallookup(self, f):
546 550 '''Mark a file normal, but possibly dirty.'''
547 551 if self._pl[1] != nullid and f in self._map:
548 552 # if there is a merge going on and the file was either
549 553 # in state 'm' (-1) or coming from other parent (-2) before
550 554 # being removed, restore that state.
551 555 entry = self._map[f]
552 556 if entry[0] == 'r' and entry[2] in (-1, -2):
553 557 source = self._copymap.get(f)
554 558 if entry[2] == -1:
555 559 self.merge(f)
556 560 elif entry[2] == -2:
557 561 self.otherparent(f)
558 562 if source:
559 563 self.copy(source, f)
560 564 return
561 565 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
562 566 return
563 567 self._addpath(f, 'n', 0, -1, -1)
564 568 if f in self._copymap:
565 569 del self._copymap[f]
566 570 if f in self._nonnormalset:
567 571 self._nonnormalset.remove(f)
568 572
569 573 def otherparent(self, f):
570 574 '''Mark as coming from the other parent, always dirty.'''
571 575 if self._pl[1] == nullid:
572 576 raise error.Abort(_("setting %r to other parent "
573 577 "only allowed in merges") % f)
574 578 if f in self and self[f] == 'n':
575 579 # merge-like
576 580 self._addpath(f, 'm', 0, -2, -1)
577 581 else:
578 582 # add-like
579 583 self._addpath(f, 'n', 0, -2, -1)
580 584
581 585 if f in self._copymap:
582 586 del self._copymap[f]
583 587
584 588 def add(self, f):
585 589 '''Mark a file added.'''
586 590 self._addpath(f, 'a', 0, -1, -1)
587 591 if f in self._copymap:
588 592 del self._copymap[f]
589 593
590 594 def remove(self, f):
591 595 '''Mark a file removed.'''
592 596 self._dirty = True
593 597 self._droppath(f)
594 598 size = 0
595 599 if self._pl[1] != nullid and f in self._map:
596 600 # backup the previous state
597 601 entry = self._map[f]
598 602 if entry[0] == 'm': # merge
599 603 size = -1
600 604 elif entry[0] == 'n' and entry[2] == -2: # other parent
601 605 size = -2
602 606 self._otherparentset.add(f)
603 607 self._map[f] = dirstatetuple('r', 0, size, 0)
604 608 self._nonnormalset.add(f)
605 609 if size == 0 and f in self._copymap:
606 610 del self._copymap[f]
607 611
608 612 def merge(self, f):
609 613 '''Mark a file merged.'''
610 614 if self._pl[1] == nullid:
611 615 return self.normallookup(f)
612 616 return self.otherparent(f)
613 617
614 618 def drop(self, f):
615 619 '''Drop a file from the dirstate'''
616 620 if f in self._map:
617 621 self._dirty = True
618 622 self._droppath(f)
619 623 del self._map[f]
620 624 if f in self._nonnormalset:
621 625 self._nonnormalset.remove(f)
622 626 if f in self._copymap:
623 627 del self._copymap[f]
624 628
625 629 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
626 630 if exists is None:
627 631 exists = os.path.lexists(os.path.join(self._root, path))
628 632 if not exists:
629 633 # Maybe a path component exists
630 634 if not ignoremissing and '/' in path:
631 635 d, f = path.rsplit('/', 1)
632 636 d = self._normalize(d, False, ignoremissing, None)
633 637 folded = d + "/" + f
634 638 else:
635 639 # No path components, preserve original case
636 640 folded = path
637 641 else:
638 642 # recursively normalize leading directory components
639 643 # against dirstate
640 644 if '/' in normed:
641 645 d, f = normed.rsplit('/', 1)
642 646 d = self._normalize(d, False, ignoremissing, True)
643 647 r = self._root + "/" + d
644 648 folded = d + "/" + util.fspath(f, r)
645 649 else:
646 650 folded = util.fspath(normed, self._root)
647 651 storemap[normed] = folded
648 652
649 653 return folded
650 654
651 655 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
652 656 normed = util.normcase(path)
653 657 folded = self._filefoldmap.get(normed, None)
654 658 if folded is None:
655 659 if isknown:
656 660 folded = path
657 661 else:
658 662 folded = self._discoverpath(path, normed, ignoremissing, exists,
659 663 self._filefoldmap)
660 664 return folded
661 665
662 666 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
663 667 normed = util.normcase(path)
664 668 folded = self._filefoldmap.get(normed, None)
665 669 if folded is None:
666 670 folded = self._dirfoldmap.get(normed, None)
667 671 if folded is None:
668 672 if isknown:
669 673 folded = path
670 674 else:
671 675 # store discovered result in dirfoldmap so that future
672 676 # normalizefile calls don't start matching directories
673 677 folded = self._discoverpath(path, normed, ignoremissing, exists,
674 678 self._dirfoldmap)
675 679 return folded
676 680
677 681 def normalize(self, path, isknown=False, ignoremissing=False):
678 682 '''
679 683 normalize the case of a pathname when on a casefolding filesystem
680 684
681 685 isknown specifies whether the filename came from walking the
682 686 disk, to avoid extra filesystem access.
683 687
684 688 If ignoremissing is True, missing path are returned
685 689 unchanged. Otherwise, we try harder to normalize possibly
686 690 existing path components.
687 691
688 692 The normalized case is determined based on the following precedence:
689 693
690 694 - version of name already stored in the dirstate
691 695 - version of name stored on disk
692 696 - version provided via command arguments
693 697 '''
694 698
695 699 if self._checkcase:
696 700 return self._normalize(path, isknown, ignoremissing)
697 701 return path
698 702
699 703 def clear(self):
700 704 self._map = {}
701 705 self._nonnormalset = set()
702 706 self._otherparentset = set()
703 707 if "_dirs" in self.__dict__:
704 708 delattr(self, "_dirs")
705 709 self._copymap = {}
706 710 self._pl = [nullid, nullid]
707 711 self._lastnormaltime = 0
708 712 self._updatedfiles.clear()
709 713 self._dirty = True
710 714
711 715 def rebuild(self, parent, allfiles, changedfiles=None):
712 716 if changedfiles is None:
713 717 # Rebuild entire dirstate
714 718 changedfiles = allfiles
715 719 lastnormaltime = self._lastnormaltime
716 720 self.clear()
717 721 self._lastnormaltime = lastnormaltime
718 722
719 723 if self._origpl is None:
720 724 self._origpl = self._pl
721 725 self._pl = (parent, nullid)
722 726 for f in changedfiles:
723 727 if f in allfiles:
724 728 self.normallookup(f)
725 729 else:
726 730 self.drop(f)
727 731
728 732 self._dirty = True
729 733
730 734 def write(self, tr):
731 735 if not self._dirty:
732 736 return
733 737
734 738 filename = self._filename
735 739 if tr:
736 740 # 'dirstate.write()' is not only for writing in-memory
737 741 # changes out, but also for dropping ambiguous timestamp.
738 742 # delayed writing re-raise "ambiguous timestamp issue".
739 743 # See also the wiki page below for detail:
740 744 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
741 745
742 746 # emulate dropping timestamp in 'parsers.pack_dirstate'
743 747 now = _getfsnow(self._opener)
744 748 dmap = self._map
745 749 for f in self._updatedfiles:
746 750 e = dmap.get(f)
747 751 if e is not None and e[0] == 'n' and e[3] == now:
748 752 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
749 753 self._nonnormalset.add(f)
750 754
751 755 # emulate that all 'dirstate.normal' results are written out
752 756 self._lastnormaltime = 0
753 757 self._updatedfiles.clear()
754 758
755 759 # delay writing in-memory changes out
756 760 tr.addfilegenerator('dirstate', (self._filename,),
757 761 self._writedirstate, location='plain')
758 762 return
759 763
760 764 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
761 765 self._writedirstate(st)
762 766
763 767 def addparentchangecallback(self, category, callback):
764 768 """add a callback to be called when the wd parents are changed
765 769
766 770 Callback will be called with the following arguments:
767 771 dirstate, (oldp1, oldp2), (newp1, newp2)
768 772
769 773 Category is a unique identifier to allow overwriting an old callback
770 774 with a newer callback.
771 775 """
772 776 self._plchangecallbacks[category] = callback
773 777
774 778 def _writedirstate(self, st):
775 779 # notify callbacks about parents change
776 780 if self._origpl is not None and self._origpl != self._pl:
777 781 for c, callback in sorted(self._plchangecallbacks.iteritems()):
778 782 callback(self, self._origpl, self._pl)
779 783 self._origpl = None
780 784 # use the modification time of the newly created temporary file as the
781 785 # filesystem's notion of 'now'
782 786 now = util.fstat(st).st_mtime & _rangemask
783 787
784 788 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
785 789 # timestamp of each entries in dirstate, because of 'now > mtime'
786 790 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
787 791 if delaywrite > 0:
788 792 # do we have any files to delay for?
789 793 for f, e in self._map.iteritems():
790 794 if e[0] == 'n' and e[3] == now:
791 795 import time # to avoid useless import
792 796 # rather than sleep n seconds, sleep until the next
793 797 # multiple of n seconds
794 798 clock = time.time()
795 799 start = int(clock) - (int(clock) % delaywrite)
796 800 end = start + delaywrite
797 801 time.sleep(end - clock)
798 802 now = end # trust our estimate that the end is near now
799 803 break
800 804
801 805 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
802 806 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
803 807 st.close()
804 808 self._lastnormaltime = 0
805 809 self._dirty = self._dirtypl = False
806 810
807 811 def _dirignore(self, f):
808 812 if f == '.':
809 813 return False
810 814 if self._ignore(f):
811 815 return True
812 816 for p in util.finddirs(f):
813 817 if self._ignore(p):
814 818 return True
815 819 return False
816 820
817 821 def _ignorefiles(self):
818 822 files = []
819 823 if os.path.exists(self._join('.hgignore')):
820 824 files.append(self._join('.hgignore'))
821 825 for name, path in self._ui.configitems("ui"):
822 826 if name == 'ignore' or name.startswith('ignore.'):
823 827 # we need to use os.path.join here rather than self._join
824 828 # because path is arbitrary and user-specified
825 829 files.append(os.path.join(self._rootdir, util.expandpath(path)))
826 830 return files
827 831
828 832 def _ignorefileandline(self, f):
829 833 files = collections.deque(self._ignorefiles())
830 834 visited = set()
831 835 while files:
832 836 i = files.popleft()
833 837 patterns = matchmod.readpatternfile(i, self._ui.warn,
834 838 sourceinfo=True)
835 839 for pattern, lineno, line in patterns:
836 840 kind, p = matchmod._patsplit(pattern, 'glob')
837 841 if kind == "subinclude":
838 842 if p not in visited:
839 843 files.append(p)
840 844 continue
841 845 m = matchmod.match(self._root, '', [], [pattern],
842 846 warn=self._ui.warn)
843 847 if m(f):
844 848 return (i, lineno, line)
845 849 visited.add(i)
846 850 return (None, -1, "")
847 851
848 852 def _walkexplicit(self, match, subrepos):
849 853 '''Get stat data about the files explicitly specified by match.
850 854
851 855 Return a triple (results, dirsfound, dirsnotfound).
852 856 - results is a mapping from filename to stat result. It also contains
853 857 listings mapping subrepos and .hg to None.
854 858 - dirsfound is a list of files found to be directories.
855 859 - dirsnotfound is a list of files that the dirstate thinks are
856 860 directories and that were not found.'''
857 861
858 862 def badtype(mode):
859 863 kind = _('unknown')
860 864 if stat.S_ISCHR(mode):
861 865 kind = _('character device')
862 866 elif stat.S_ISBLK(mode):
863 867 kind = _('block device')
864 868 elif stat.S_ISFIFO(mode):
865 869 kind = _('fifo')
866 870 elif stat.S_ISSOCK(mode):
867 871 kind = _('socket')
868 872 elif stat.S_ISDIR(mode):
869 873 kind = _('directory')
870 874 return _('unsupported file type (type is %s)') % kind
871 875
872 876 matchedir = match.explicitdir
873 877 badfn = match.bad
874 878 dmap = self._map
875 879 lstat = os.lstat
876 880 getkind = stat.S_IFMT
877 881 dirkind = stat.S_IFDIR
878 882 regkind = stat.S_IFREG
879 883 lnkkind = stat.S_IFLNK
880 884 join = self._join
881 885 dirsfound = []
882 886 foundadd = dirsfound.append
883 887 dirsnotfound = []
884 888 notfoundadd = dirsnotfound.append
885 889
886 890 if not match.isexact() and self._checkcase:
887 891 normalize = self._normalize
888 892 else:
889 893 normalize = None
890 894
891 895 files = sorted(match.files())
892 896 subrepos.sort()
893 897 i, j = 0, 0
894 898 while i < len(files) and j < len(subrepos):
895 899 subpath = subrepos[j] + "/"
896 900 if files[i] < subpath:
897 901 i += 1
898 902 continue
899 903 while i < len(files) and files[i].startswith(subpath):
900 904 del files[i]
901 905 j += 1
902 906
903 907 if not files or '.' in files:
904 908 files = ['.']
905 909 results = dict.fromkeys(subrepos)
906 910 results['.hg'] = None
907 911
908 912 alldirs = None
909 913 for ff in files:
910 914 # constructing the foldmap is expensive, so don't do it for the
911 915 # common case where files is ['.']
912 916 if normalize and ff != '.':
913 917 nf = normalize(ff, False, True)
914 918 else:
915 919 nf = ff
916 920 if nf in results:
917 921 continue
918 922
919 923 try:
920 924 st = lstat(join(nf))
921 925 kind = getkind(st.st_mode)
922 926 if kind == dirkind:
923 927 if nf in dmap:
924 928 # file replaced by dir on disk but still in dirstate
925 929 results[nf] = None
926 930 if matchedir:
927 931 matchedir(nf)
928 932 foundadd((nf, ff))
929 933 elif kind == regkind or kind == lnkkind:
930 934 results[nf] = st
931 935 else:
932 936 badfn(ff, badtype(kind))
933 937 if nf in dmap:
934 938 results[nf] = None
935 939 except OSError as inst: # nf not found on disk - it is dirstate only
936 940 if nf in dmap: # does it exactly match a missing file?
937 941 results[nf] = None
938 942 else: # does it match a missing directory?
939 943 if alldirs is None:
940 944 alldirs = util.dirs(dmap)
941 945 if nf in alldirs:
942 946 if matchedir:
943 947 matchedir(nf)
944 948 notfoundadd(nf)
945 949 else:
946 950 badfn(ff, inst.strerror)
947 951
948 952 # Case insensitive filesystems cannot rely on lstat() failing to detect
949 953 # a case-only rename. Prune the stat object for any file that does not
950 954 # match the case in the filesystem, if there are multiple files that
951 955 # normalize to the same path.
952 956 if match.isexact() and self._checkcase:
953 957 normed = {}
954 958
955 959 for f, st in results.iteritems():
956 960 if st is None:
957 961 continue
958 962
959 963 nc = util.normcase(f)
960 964 paths = normed.get(nc)
961 965
962 966 if paths is None:
963 967 paths = set()
964 968 normed[nc] = paths
965 969
966 970 paths.add(f)
967 971
968 972 for norm, paths in normed.iteritems():
969 973 if len(paths) > 1:
970 974 for path in paths:
971 975 folded = self._discoverpath(path, norm, True, None,
972 976 self._dirfoldmap)
973 977 if path != folded:
974 978 results[path] = None
975 979
976 980 return results, dirsfound, dirsnotfound
977 981
978 982 def walk(self, match, subrepos, unknown, ignored, full=True):
979 983 '''
980 984 Walk recursively through the directory tree, finding all files
981 985 matched by match.
982 986
983 987 If full is False, maybe skip some known-clean files.
984 988
985 989 Return a dict mapping filename to stat-like object (either
986 990 mercurial.osutil.stat instance or return value of os.stat()).
987 991
988 992 '''
989 993 # full is a flag that extensions that hook into walk can use -- this
990 994 # implementation doesn't use it at all. This satisfies the contract
991 995 # because we only guarantee a "maybe".
992 996
993 997 if ignored:
994 998 ignore = util.never
995 999 dirignore = util.never
996 1000 elif unknown:
997 1001 ignore = self._ignore
998 1002 dirignore = self._dirignore
999 1003 else:
1000 1004 # if not unknown and not ignored, drop dir recursion and step 2
1001 1005 ignore = util.always
1002 1006 dirignore = util.always
1003 1007
1004 1008 matchfn = match.matchfn
1005 1009 matchalways = match.always()
1006 1010 matchtdir = match.traversedir
1007 1011 dmap = self._map
1008 1012 listdir = util.listdir
1009 1013 lstat = os.lstat
1010 1014 dirkind = stat.S_IFDIR
1011 1015 regkind = stat.S_IFREG
1012 1016 lnkkind = stat.S_IFLNK
1013 1017 join = self._join
1014 1018
1015 1019 exact = skipstep3 = False
1016 1020 if match.isexact(): # match.exact
1017 1021 exact = True
1018 1022 dirignore = util.always # skip step 2
1019 1023 elif match.prefix(): # match.match, no patterns
1020 1024 skipstep3 = True
1021 1025
1022 1026 if not exact and self._checkcase:
1023 1027 normalize = self._normalize
1024 1028 normalizefile = self._normalizefile
1025 1029 skipstep3 = False
1026 1030 else:
1027 1031 normalize = self._normalize
1028 1032 normalizefile = None
1029 1033
1030 1034 # step 1: find all explicit files
1031 1035 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1032 1036
1033 1037 skipstep3 = skipstep3 and not (work or dirsnotfound)
1034 1038 work = [d for d in work if not dirignore(d[0])]
1035 1039
1036 1040 # step 2: visit subdirectories
1037 1041 def traverse(work, alreadynormed):
1038 1042 wadd = work.append
1039 1043 while work:
1040 1044 nd = work.pop()
1041 1045 if not match.visitdir(nd):
1042 1046 continue
1043 1047 skip = None
1044 1048 if nd == '.':
1045 1049 nd = ''
1046 1050 else:
1047 1051 skip = '.hg'
1048 1052 try:
1049 1053 entries = listdir(join(nd), stat=True, skip=skip)
1050 1054 except OSError as inst:
1051 1055 if inst.errno in (errno.EACCES, errno.ENOENT):
1052 1056 match.bad(self.pathto(nd), inst.strerror)
1053 1057 continue
1054 1058 raise
1055 1059 for f, kind, st in entries:
1056 1060 if normalizefile:
1057 1061 # even though f might be a directory, we're only
1058 1062 # interested in comparing it to files currently in the
1059 1063 # dmap -- therefore normalizefile is enough
1060 1064 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1061 1065 True)
1062 1066 else:
1063 1067 nf = nd and (nd + "/" + f) or f
1064 1068 if nf not in results:
1065 1069 if kind == dirkind:
1066 1070 if not ignore(nf):
1067 1071 if matchtdir:
1068 1072 matchtdir(nf)
1069 1073 wadd(nf)
1070 1074 if nf in dmap and (matchalways or matchfn(nf)):
1071 1075 results[nf] = None
1072 1076 elif kind == regkind or kind == lnkkind:
1073 1077 if nf in dmap:
1074 1078 if matchalways or matchfn(nf):
1075 1079 results[nf] = st
1076 1080 elif ((matchalways or matchfn(nf))
1077 1081 and not ignore(nf)):
1078 1082 # unknown file -- normalize if necessary
1079 1083 if not alreadynormed:
1080 1084 nf = normalize(nf, False, True)
1081 1085 results[nf] = st
1082 1086 elif nf in dmap and (matchalways or matchfn(nf)):
1083 1087 results[nf] = None
1084 1088
1085 1089 for nd, d in work:
1086 1090 # alreadynormed means that processwork doesn't have to do any
1087 1091 # expensive directory normalization
1088 1092 alreadynormed = not normalize or nd == d
1089 1093 traverse([d], alreadynormed)
1090 1094
1091 1095 for s in subrepos:
1092 1096 del results[s]
1093 1097 del results['.hg']
1094 1098
1095 1099 # step 3: visit remaining files from dmap
1096 1100 if not skipstep3 and not exact:
1097 1101 # If a dmap file is not in results yet, it was either
1098 1102 # a) not matching matchfn b) ignored, c) missing, or d) under a
1099 1103 # symlink directory.
1100 1104 if not results and matchalways:
1101 1105 visit = [f for f in dmap]
1102 1106 else:
1103 1107 visit = [f for f in dmap if f not in results and matchfn(f)]
1104 1108 visit.sort()
1105 1109
1106 1110 if unknown:
1107 1111 # unknown == True means we walked all dirs under the roots
1108 1112 # that wasn't ignored, and everything that matched was stat'ed
1109 1113 # and is already in results.
1110 1114 # The rest must thus be ignored or under a symlink.
1111 1115 audit_path = pathutil.pathauditor(self._root)
1112 1116
1113 1117 for nf in iter(visit):
1114 1118 # If a stat for the same file was already added with a
1115 1119 # different case, don't add one for this, since that would
1116 1120 # make it appear as if the file exists under both names
1117 1121 # on disk.
1118 1122 if (normalizefile and
1119 1123 normalizefile(nf, True, True) in results):
1120 1124 results[nf] = None
1121 1125 # Report ignored items in the dmap as long as they are not
1122 1126 # under a symlink directory.
1123 1127 elif audit_path.check(nf):
1124 1128 try:
1125 1129 results[nf] = lstat(join(nf))
1126 1130 # file was just ignored, no links, and exists
1127 1131 except OSError:
1128 1132 # file doesn't exist
1129 1133 results[nf] = None
1130 1134 else:
1131 1135 # It's either missing or under a symlink directory
1132 1136 # which we in this case report as missing
1133 1137 results[nf] = None
1134 1138 else:
1135 1139 # We may not have walked the full directory tree above,
1136 1140 # so stat and check everything we missed.
1137 1141 iv = iter(visit)
1138 1142 for st in util.statfiles([join(i) for i in visit]):
1139 1143 results[next(iv)] = st
1140 1144 return results
1141 1145
1142 1146 def status(self, match, subrepos, ignored, clean, unknown):
1143 1147 '''Determine the status of the working copy relative to the
1144 1148 dirstate and return a pair of (unsure, status), where status is of type
1145 1149 scmutil.status and:
1146 1150
1147 1151 unsure:
1148 1152 files that might have been modified since the dirstate was
1149 1153 written, but need to be read to be sure (size is the same
1150 1154 but mtime differs)
1151 1155 status.modified:
1152 1156 files that have definitely been modified since the dirstate
1153 1157 was written (different size or mode)
1154 1158 status.clean:
1155 1159 files that have definitely not been modified since the
1156 1160 dirstate was written
1157 1161 '''
1158 1162 listignored, listclean, listunknown = ignored, clean, unknown
1159 1163 lookup, modified, added, unknown, ignored = [], [], [], [], []
1160 1164 removed, deleted, clean = [], [], []
1161 1165
1162 1166 dmap = self._map
1163 1167 ladd = lookup.append # aka "unsure"
1164 1168 madd = modified.append
1165 1169 aadd = added.append
1166 1170 uadd = unknown.append
1167 1171 iadd = ignored.append
1168 1172 radd = removed.append
1169 1173 dadd = deleted.append
1170 1174 cadd = clean.append
1171 1175 mexact = match.exact
1172 1176 dirignore = self._dirignore
1173 1177 checkexec = self._checkexec
1174 1178 copymap = self._copymap
1175 1179 lastnormaltime = self._lastnormaltime
1176 1180
1177 1181 # We need to do full walks when either
1178 1182 # - we're listing all clean files, or
1179 1183 # - match.traversedir does something, because match.traversedir should
1180 1184 # be called for every dir in the working dir
1181 1185 full = listclean or match.traversedir is not None
1182 1186 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1183 1187 full=full).iteritems():
1184 1188 if fn not in dmap:
1185 1189 if (listignored or mexact(fn)) and dirignore(fn):
1186 1190 if listignored:
1187 1191 iadd(fn)
1188 1192 else:
1189 1193 uadd(fn)
1190 1194 continue
1191 1195
1192 1196 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1193 1197 # written like that for performance reasons. dmap[fn] is not a
1194 1198 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1195 1199 # opcode has fast paths when the value to be unpacked is a tuple or
1196 1200 # a list, but falls back to creating a full-fledged iterator in
1197 1201 # general. That is much slower than simply accessing and storing the
1198 1202 # tuple members one by one.
1199 1203 t = dmap[fn]
1200 1204 state = t[0]
1201 1205 mode = t[1]
1202 1206 size = t[2]
1203 1207 time = t[3]
1204 1208
1205 1209 if not st and state in "nma":
1206 1210 dadd(fn)
1207 1211 elif state == 'n':
1208 1212 if (size >= 0 and
1209 1213 ((size != st.st_size and size != st.st_size & _rangemask)
1210 1214 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1211 1215 or size == -2 # other parent
1212 1216 or fn in copymap):
1213 1217 madd(fn)
1214 1218 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1215 1219 ladd(fn)
1216 1220 elif st.st_mtime == lastnormaltime:
1217 1221 # fn may have just been marked as normal and it may have
1218 1222 # changed in the same second without changing its size.
1219 1223 # This can happen if we quickly do multiple commits.
1220 1224 # Force lookup, so we don't miss such a racy file change.
1221 1225 ladd(fn)
1222 1226 elif listclean:
1223 1227 cadd(fn)
1224 1228 elif state == 'm':
1225 1229 madd(fn)
1226 1230 elif state == 'a':
1227 1231 aadd(fn)
1228 1232 elif state == 'r':
1229 1233 radd(fn)
1230 1234
1231 1235 return (lookup, scmutil.status(modified, added, removed, deleted,
1232 1236 unknown, ignored, clean))
1233 1237
1234 1238 def matches(self, match):
1235 1239 '''
1236 1240 return files in the dirstate (in whatever state) filtered by match
1237 1241 '''
1238 1242 dmap = self._map
1239 1243 if match.always():
1240 1244 return dmap.keys()
1241 1245 files = match.files()
1242 1246 if match.isexact():
1243 1247 # fast path -- filter the other way around, since typically files is
1244 1248 # much smaller than dmap
1245 1249 return [f for f in files if f in dmap]
1246 1250 if match.prefix() and all(fn in dmap for fn in files):
1247 1251 # fast path -- all the values are known to be files, so just return
1248 1252 # that
1249 1253 return list(files)
1250 1254 return [f for f in dmap if match(f)]
1251 1255
1252 1256 def _actualfilename(self, tr):
1253 1257 if tr:
1254 1258 return self._pendingfilename
1255 1259 else:
1256 1260 return self._filename
1257 1261
1258 1262 def savebackup(self, tr, suffix='', prefix=''):
1259 1263 '''Save current dirstate into backup file with suffix'''
1260 1264 assert len(suffix) > 0 or len(prefix) > 0
1261 1265 filename = self._actualfilename(tr)
1262 1266
1263 1267 # use '_writedirstate' instead of 'write' to write changes certainly,
1264 1268 # because the latter omits writing out if transaction is running.
1265 1269 # output file will be used to create backup of dirstate at this point.
1266 1270 if self._dirty or not self._opener.exists(filename):
1267 1271 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1268 1272 checkambig=True))
1269 1273
1270 1274 if tr:
1271 1275 # ensure that subsequent tr.writepending returns True for
1272 1276 # changes written out above, even if dirstate is never
1273 1277 # changed after this
1274 1278 tr.addfilegenerator('dirstate', (self._filename,),
1275 1279 self._writedirstate, location='plain')
1276 1280
1277 1281 # ensure that pending file written above is unlinked at
1278 1282 # failure, even if tr.writepending isn't invoked until the
1279 1283 # end of this transaction
1280 1284 tr.registertmp(filename, location='plain')
1281 1285
1282 1286 backupname = prefix + self._filename + suffix
1283 1287 assert backupname != filename
1284 1288 self._opener.tryunlink(backupname)
1285 1289 # hardlink backup is okay because _writedirstate is always called
1286 1290 # with an "atomictemp=True" file.
1287 1291 util.copyfile(self._opener.join(filename),
1288 1292 self._opener.join(backupname), hardlink=True)
1289 1293
1290 1294 def restorebackup(self, tr, suffix='', prefix=''):
1291 1295 '''Restore dirstate by backup file with suffix'''
1292 1296 assert len(suffix) > 0 or len(prefix) > 0
1293 1297 # this "invalidate()" prevents "wlock.release()" from writing
1294 1298 # changes of dirstate out after restoring from backup file
1295 1299 self.invalidate()
1296 1300 filename = self._actualfilename(tr)
1297 1301 # using self._filename to avoid having "pending" in the backup filename
1298 1302 self._opener.rename(prefix + self._filename + suffix, filename,
1299 1303 checkambig=True)
1300 1304
1301 1305 def clearbackup(self, tr, suffix='', prefix=''):
1302 1306 '''Clear backup file with suffix'''
1303 1307 assert len(suffix) > 0 or len(prefix) > 0
1304 1308 # using self._filename to avoid having "pending" in the backup filename
1305 1309 self._opener.unlink(prefix + self._filename + suffix)
General Comments 0
You need to be logged in to leave comments. Login now