Skip to content

Parser must be a string or character stream, not NoneType, csv2ofx  #79

@khizarsultan

Description

@khizarsultan

Thank you for the attention!
My Problem Statement is: Convert .csv file into .ofx file using open source library.
My Proposed Solution is: Use the csv2ofx python library to convert .csv file into .ofx

Description of csv file: My .csv file contains following columns:

  • Settlement date',
  • 'Trade date',
  • 'Symbol',
  • 'Name',
  • 'Transaction type',
  • 'Quantity',
  • 'Price',
  • 'Commissions and fees',
  • 'Amount '

Mapping: i apply the custom mapping (as i have different columns) given as follow:

from operator import itemgetter
mapping = {
    'has_header': True,
    'is_split': False,
    'delimiter': ',',
    'amount': itemgetter('Amount'),
    'date': itemgetter('Settlement date'),
    'symbol':itemgetter('Symbol'),
    'type': itemgetter('Transaction type'),
    'payee': itemgetter('Name'),
    'notes': itemgetter('Commissions and fees'),
}

Code: i write the following code to convert .csv to .ofx

ofx = OFX(mapping)
records = read_csv('Cleaned1.csv')
groups = ofx.gen_groups(records)
trxns = ofx.gen_trxns(groups)
cleaned_trxns = ofx.clean_trxns(trxns)
data = utils.gen_data(cleaned_trxns)
content = it.chain([ofx.header(), ofx.gen_body(data), ofx.footer()])

Roadblocks :

When i run the code, It throw the following error:

TypeError                                 Traceback (most recent call last)
<ipython-input-389-0d57618f3bd8> in <module>
      1 with open('Cleaned_new.ofx', 'wb') as f:
----> 2     for line in IterStringIO(content):
      3         print(line)
      4         f.write(line)

~\Anaconda\lib\site-packages\meza\io.py in __next__(self)
    106 
    107     def __next__(self):
--> 108         return self._read(next(self.lines))
    109 
    110     def __iter__(self):

~\Anaconda\lib\site-packages\meza\io.py in <genexpr>(.0)
    116         # TODO: what about a csv with embedded newlines?
    117         groups = groupby_line(self.iter)
--> 118         return (g for k, g in groups if k)
    119 
    120     def _read(self, iterable, num=None, newline=True):

~\Anaconda\lib\site-packages\meza\io.py in <genexpr>(.0)
     59 
     60 # pylint: disable=C0103
---> 61 encode = lambda iterable: (s.encode(ENCODING) for s in iterable)
     62 chain = lambda iterable: it.chain.from_iterable(iterable or [])
     63 

~\Desktop\MyML\Work\csv2ofx\csv2ofx\csv2ofx\ofx.py in gen_body(self, data)
    442     def gen_body(self, data):  # noqa: C901
    443         """ Generate the OFX body """
--> 444         for datum in data:
    445             grp = datum['group']
    446 

~\Desktop\MyML\Work\csv2ofx\csv2ofx\csv2ofx\utils.py in gen_data(groups)
    158 def gen_data(groups):
    159     """ Generate the transaction data """
--> 160     for group, main_pos, sorted_trxns in groups:
    161         for pos, trxn in sorted_trxns:
    162             base_data = {

~\Desktop\MyML\Work\csv2ofx\csv2ofx\csv2ofx\__init__.py in clean_trxns(self, groups)
    312             # pylint: disable=cell-var-from-loop
    313             keyfunc = lambda enum: enum[0] != main_pos
--> 314             sorted_trxns = sorted(enumerate(filtered_trxns), key=keyfunc)
    315             yield (grp, main_pos, sorted_trxns)

~\Desktop\MyML\Work\csv2ofx\csv2ofx\csv2ofx\__init__.py in skip_transaction(self, trxn)
    159             True
    160         """
--> 161         return not self.end >= parse(self.get('date', trxn)) >= self.start
    162 
    163     def convert_amount(self, trxn):

~\Anaconda\lib\site-packages\dateutil\parser\_parser.py in parse(timestr, parserinfo, **kwargs)
   1372         return parser(parserinfo).parse(timestr, **kwargs)
   1373     else:
-> 1374         return DEFAULTPARSER.parse(timestr, **kwargs)
   1375 
   1376 

~\Anaconda\lib\site-packages\dateutil\parser\_parser.py in parse(self, timestr, default, ignoretz, tzinfos, **kwargs)
    644                                                       second=0, microsecond=0)
    645 
--> 646         res, skipped_tokens = self._parse(timestr, **kwargs)
    647 
    648         if res is None:

~\Anaconda\lib\site-packages\dateutil\parser\_parser.py in _parse(self, timestr, dayfirst, yearfirst, fuzzy, fuzzy_with_tokens)
    723 
    724         res = self._result()
--> 725         l = _timelex.split(timestr)         # Splits the timestr into tokens
    726 
    727         skipped_idxs = []

~\Anaconda\lib\site-packages\dateutil\parser\_parser.py in split(cls, s)
    205     @classmethod
    206     def split(cls, s):
--> 207         return list(cls(s))
    208 
    209     @classmethod

~\Anaconda\lib\site-packages\dateutil\parser\_parser.py in __init__(self, instream)
     73             instream = StringIO(instream)
     74         elif getattr(instream, 'read', None) is None:
---> 75             raise TypeError('Parser must be a string or character stream, not '
     76                             '{itype}'.format(itype=instream.__class__.__name__))
     77 

TypeError: Parser must be a string or character stream, not NoneType

I really appreciate as you took some time out to read my problem, Looking forward to hearing the suggestions or solutions from you. Thank you.

Metadata

Metadata

Assignees

No one assigned

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions