Skip to main content

Export Python Code Sample - Checks

Updated over a year ago

A sample of Quick Python Code that can help to export Checks JSON data to CSV

import requests
import pandas as pd
from datetime import datetime

HEADERS = {
'Accept': 'application/json',
'CLIENT-ID': ,
'AUTHORIZATION':
}

def fetch_data(base_url, start_date, end_date):
page = 1
max_pages = 999
first_request_made = False
while page <= max_pages:
print(f"page: {page} , max_pages {max_pages}")
request_url = f"{base_url}?page={page}&created_date__gt={start_date}&created_date__lt={end_date}"
try:
response = requests.get(request_url, headers=HEADERS, timeout=300)
if response.status_code != 200:
print(f"Failed to fetch data: {response.status_code}")
continue
checks = response.json()["results"]
total_checks = response.json()["count"]
print(f"total checks: {total_checks}")
if not checks:
break
total_pages = total_checks/50
if max_pages == 999:
max_pages = total_pages
# Flatten nested fields
flattened_checks = []
for check in checks:
flat_check = check.copy()
# Handle endorsement nested field
if 'endorsement' in flat_check and flat_check['endorsement']:
for key, value in flat_check['endorsement'].items():
flat_check[f'endorsement_{key}'] = value
del flat_check['endorsement']

# Handle micr nested field
if 'micr' in flat_check and flat_check['micr']:
for key, value in flat_check['micr'].items():
flat_check[f'micr_{key}'] = value
del flat_check['micr']

# Handle meta nested field
if 'meta' in flat_check and flat_check['meta']:
for key, value in flat_check['meta'].items():
if not isinstance(value, (list, dict)):
flat_check[f'meta_{key}'] = value
elif key == 'tags' and isinstance(value, list):
flat_check[f'meta_{key}'] = ','.join(str(tag) for tag in value) if value else ''
elif key == 'pages' and isinstance(value, list):
# Just store the count of pages
flat_check['meta_page_count'] = len(value)
# Store if front page exists
flat_check['meta_has_front_page'] = any(page.get('is_front', False) for page in value if isinstance(page, dict))
elif key == 'duplicates' and isinstance(value, list):
flat_check['meta_duplicate_count'] = len(value)
del flat_check['meta']

flattened_checks.append(flat_check)

# Create DataFrame with flattened data
df = pd.DataFrame(flattened_checks)

# Define the columns to keep and their order
columns_order = [
'id', 'external_id', 'pdf_url', 'created_date', 'updated_date', 'date', 'memo',
'amount', 'currency_code', 'amount_text', 'payer_name', 'payer_address',
'receiver_name', 'receiver_address', 'bank_name', 'bank_address', 'check_number',
'micr_routing_number', 'micr_account_number', 'micr_serial_number', 'micr_raw',
'fractional_routing_number', 'routing_from_fractional', 'meta_external_id', 'meta_tags',
'meta_page_count', 'meta_has_front_page', 'meta_duplicate_count', 'meta_amount_text_value',
'is_signed', 'is_endorsed', 'endorsement_is_mobile_or_remote_deposit_only',
'endorsement_is_signed', 'text', 'custom_fields'
]

# Filter to only include specified columns that exist in the DataFrame
available_columns = [col for col in columns_order if col in df.columns]
df = df[available_columns]

if not first_request_made:
df.to_csv("checks.csv", mode='w', index=False)
first_request_made = True
else:
df.to_csv("checks.csv", mode='a', header=False, index=False)
page += 1 # move to the next document_id only if the request is successful
except Exception as e:
print(f"Failed to retrieve documents from: {e}")
# Do not increment the index, try again on the next iteration

def main():
base_url = "https://api.veryfi.com/api/v8/partner/checks/"
start_date = '2025-00-00 00:00:00'
end_date = '2025-00-00 00:00:00'
fetch_data(base_url, start_date, end_date)

if __name__ == "__main__":
main()

Did this answer your question?